unity_model_loader.cpp 2.5 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576
  1. // Copyright (c) Meta Platforms, Inc. and affiliates.
  2. // All rights reserved.
  3. //
  4. // This source code is licensed under the license found in the
  5. // LICENSE file in the root directory of this source tree.
  6. #include "ggml/ggml.h"
  7. #include "ggml/ggml-alloc.h"
  8. #include "common.h"
  9. #include "common-ggml.h"
  10. #include "unity_model_loader.h"
  11. void unity_model_loader::load_hparams(fairseq2_model& model, std::ifstream &fin)
  12. {
  13. unity_hparams* hparams = (unity_hparams*)model.hparams;
  14. read_unity_hparams(hparams, fin);
  15. if (hparams->__end_of_hparams__ != 6877961321223123048) {
  16. throw std::invalid_argument("");
  17. }
  18. }
  19. std::size_t
  20. unity_model_loader::compute_context_size(void* raw_hparams)
  21. {
  22. auto* hparams = (unity_hparams*)raw_hparams;
  23. return hparams->model_byte_size;
  24. };
  25. struct UnityArch {
  26. struct TransformerDecoder text_decoder;
  27. };
  28. void unity_model_loader::tensors_alloc(fairseq2_model &model)
  29. {
  30. auto hparams = (unity_hparams&)model.hparams;
  31. auto& arch = (UnityArch&)model.arch;
  32. const auto ctx = model.ctx;
  33. auto tensors = model.tensors;
  34. const auto vocab_size = hparams.nllb_config__vocabulary_size;
  35. const auto model_dim = hparams.nllb_config__model_dim;
  36. // This can be simplified by adding syntax sugar
  37. // frontend
  38. // arch.frontend_embed_w = ggml_new_tensor_2d(ctx, GGML_TYPE_F32, vocab_size, dim);
  39. // tensor_map["text_decoder_frontend.embed.weight"] = arch.frontend_embed_w;
  40. // layers
  41. {
  42. const auto n_layers = hparams.nllb_config__num_decoder_layers;
  43. arch.text_decoder.layers = std::vector<TransformerDecoderLayer>(n_layers);
  44. auto layers = arch.text_decoder.layers;
  45. auto num_heads = hparams.nllb_config__num_decoder_attn_heads;
  46. for (int i = 0; i < n_layers; ++i) {
  47. auto prefix = "text_decoder.layers." + std::to_string(i);
  48. MultiheadAttention_init(layers[i].self_attn, model, prefix + "self_attn", model_dim, num_heads);
  49. LayerNorm_init(layers[i].self_attn_norm, model, prefix + "self_attn_norm", model_dim);
  50. }
  51. }
  52. // // layer_norm
  53. // arch.layer_norm_w = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, dim);
  54. // tensor_map["text_decoder.layer_norm.weight"] = arch.layer_norm_w;
  55. // arch.layer_norm_b = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, dim);
  56. // tensor_map["text_decoder.layer_norm.bias"] = arch.layer_norm_b;
  57. };
  58. extern "C" void load_unity_ggml_file(fairseq2_model& model, const char* fname) {
  59. return load_fairseq2_ggml_file<unity_model_loader>(model, fname);
  60. }