ggml_convert.py 26 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763
  1. # Copyright (c) Meta Platforms, Inc. and affiliates.
  2. # All rights reserved.
  3. # This source code is licensed under the license found in the
  4. # MIT_LICENSE file in the root directory of this source tree.
  5. import dataclasses
  6. import logging
  7. import struct
  8. from enum import Enum
  9. from io import BufferedWriter
  10. from pathlib import Path
  11. from typing import Any, Callable, Dict, List, Optional, Tuple, Union, Sequence, Set, final
  12. import re
  13. import torch
  14. from fairseq2.assets import AssetCard
  15. from fairseq2.models.transformer.frontend import TransformerEmbeddingFrontend
  16. from fairseq2.nn import SinusoidalPositionEncoder
  17. from fairseq2.nn.transformer import RelativePositionalEncoding
  18. from fairseq2.data.text import SentencePieceEncoder, SentencePieceTokenizerBase
  19. from fairseq2.data.typing import PathLike
  20. from fairseq2.typing import Device, finaloverride
  21. from fairseq2.models.utils import TokenizerLoaderBase, ModelLoader
  22. from fairseq2.models.utils.checkpoint import convert_model_state_dict
  23. from fairseq2.assets import asset_store, download_manager
  24. import ggml
  25. Preprocessor = Callable[[Any], Any]
  26. log = logging.getLogger("ggml_convert")
  27. class ModelType(str, Enum):
  28. AUTO = "auto" # inferred from the model name
  29. UNITY = "unity"
  30. NLLB = "nllb"
  31. BITEXT = "bitext"
  32. BITEXT_SCRIPTED = "bitext_scripted"
  33. UNITY_SMALLER_MODELS = [
  34. "unity_nano",
  35. "unity_micro",
  36. ] # Trained with fairseq2, with custom dict (not original NLLB ones)
  37. NLLB_2_UNITY_KEYMAP = {
  38. r"^encoder_frontend\.": r"text_encoder_frontend.",
  39. r"^encoder\." : r"text_encoder.",
  40. r"^decoder\." : r"text_decoder.",
  41. r"^decoder_frontend\.": r"text_decoder_frontend.",
  42. }
  43. @final
  44. class NllbLikeTokenizer(SentencePieceTokenizerBase):
  45. """The only difference between this class and NllbTokenizer is it doesn't add a <pad> to control symbol list.
  46. Since NllbTokenizer is defined as final, we couldn't inherit from it directly. So copying ~everything"""
  47. langs: Set[str]
  48. default_lang: str
  49. def __init__(
  50. self, pathname: PathLike, langs: Sequence[str], default_lang: str
  51. ) -> None:
  52. """
  53. :param pathname:
  54. The pathname of the SentencePiece model file.
  55. :param langs:
  56. The list of supported languages.
  57. :param default_lang:
  58. The fall-back language if no language is specified.
  59. """
  60. # Each language is represented by a `__lang__` control symbol.
  61. control_symbols = [f"__{lang}__" for lang in langs]
  62. # Internal control symbols that are not relevant for eval use.
  63. control_symbols.extend(["<MINED_DATA>", "<MMT_BT_DATA>", "<SMT_BT_DATA>"])
  64. super().__init__(pathname, control_symbols)
  65. self.langs = set(langs)
  66. self.default_lang = default_lang
  67. @finaloverride
  68. def create_encoder(
  69. self,
  70. *,
  71. task: Optional[str] = None,
  72. lang: Optional[str] = None,
  73. mode: Optional[str] = None,
  74. device: Optional[Device] = None,
  75. pin_memory: bool = False,
  76. ) -> SentencePieceEncoder:
  77. """Create a token encoder.
  78. :param task:
  79. Must be 'translation'. If ``None``, defaults to 'translation'.
  80. :param lang:
  81. A language from :attr:`langs`. If ``None``, defaults to
  82. :attr:`default_lang`.
  83. :param mode:
  84. Must be 'source' or 'target'. Set to 'source' if ``lang`` is the
  85. source language; set to 'target' if ``lang`` is the target language.
  86. If ``None``, defaults to 'source'.
  87. :param device:
  88. The device on which to construct tensors.
  89. :param pin_memory:
  90. If ``True``, uses pinned memory while constructing tensors.
  91. """
  92. if task is not None and task != "translation":
  93. raise ValueError(f"`task` must be 'translation', but is '{task}' instead.")
  94. if lang is None:
  95. lang = self.default_lang
  96. if lang not in self.langs:
  97. raise ValueError(
  98. f"`lang` must be a supported language, but is '{lang}' instead."
  99. )
  100. if mode is None or mode == "source":
  101. # NLLB models expect a language token in place of BOS in source
  102. # sequences.
  103. prefix_tokens = [f"__{lang}__"]
  104. suffix_tokens = ["</s>"]
  105. elif mode == "source_mining":
  106. prefix_tokens = [f"__{lang}__", "<MINED_DATA>"]
  107. suffix_tokens = ["</s>"]
  108. elif mode == "source_mmt_bt":
  109. prefix_tokens = [f"__{lang}__", "<MMT_BT_DATA>"]
  110. suffix_tokens = ["</s>"]
  111. elif mode == "source_smt_bt":
  112. prefix_tokens = [f"__{lang}__", "<SMT_BT_DATA>"]
  113. suffix_tokens = ["</s>"]
  114. elif mode == "target":
  115. # Target sequences are expected to start with an EOS, followed by
  116. # the language token.
  117. prefix_tokens = ["</s>", f"__{lang}__"]
  118. suffix_tokens = []
  119. else:
  120. raise ValueError(
  121. f"`mode` must be 'source' or 'target', but is '{mode}' instead."
  122. )
  123. return SentencePieceEncoder(
  124. self.model,
  125. prefix_tokens=prefix_tokens,
  126. suffix_tokens=suffix_tokens,
  127. device=device,
  128. pin_memory=pin_memory,
  129. )
  130. @final
  131. class NllbLikeTokenizerLoader(TokenizerLoaderBase[NllbLikeTokenizer]):
  132. """Loads tokenizers used by NLLB models."""
  133. @finaloverride
  134. def _load(self, pathname: Path, card: AssetCard) -> NllbLikeTokenizer:
  135. langs = card.field("langs").as_list(str)
  136. default_lang = card.field("default_lang").as_(str)
  137. return NllbLikeTokenizer(pathname, langs, default_lang)
  138. def convert_unity_model(
  139. model_name: str,
  140. hparams: Optional[Dict[str, Any]] = None,
  141. ):
  142. from seamless_communication.models import unity
  143. from seamless_communication.models.unity.builder import UnitYConfig, create_unity_model
  144. from seamless_communication.models.unity.model import UnitYModel
  145. load_unity_model_without_conversion = ModelLoader[UnitYModel, UnitYConfig](
  146. asset_store,
  147. download_manager,
  148. unity.load_unity_config,
  149. create_unity_model,
  150. None,
  151. restrict_checkpoints=False,
  152. )
  153. model_config = unity.load_unity_config(model_name)
  154. hparams = flatten_config(
  155. dataclasses.asdict(model_config), separator="__", overrides=hparams
  156. )
  157. hparams["multilingual"] = True
  158. log.info(hparams)
  159. # Need the diverge here because current default in SC is to convert from fairseq1 ckpt format
  160. if model_name in UNITY_SMALLER_MODELS:
  161. model = load_unity_model_without_conversion(model_name)
  162. tokenizer = NllbLikeTokenizerLoader(asset_store, download_manager)(model_name)
  163. else:
  164. model = unity.load_unity_model(model_name)
  165. tokenizer = unity.load_unity_text_tokenizer(model_name)
  166. vocab = read_vocab_from_tokenizer(tokenizer)
  167. return model, hparams, vocab
  168. def convert_nllb_model(
  169. model_name: str,
  170. hparams: Optional[Dict[str, Any]] = None,
  171. ):
  172. from fairseq2.models.nllb.loader import load_nllb_tokenizer, load_nllb_model, load_nllb_config
  173. model_config = load_nllb_config(model_name)
  174. hparams = flatten_config(
  175. dataclasses.asdict(model_config), separator="__", overrides=hparams,
  176. )
  177. hparams["multilingual"] = True
  178. model = load_nllb_model(model_name)
  179. tokenizer = load_nllb_tokenizer(model_name)
  180. vocab = read_vocab_from_tokenizer(tokenizer)
  181. return model, hparams, vocab
  182. def convert_bitext_model(
  183. model_name: str,
  184. src_vocab: str,
  185. tgt_vocab: str,
  186. hparams: Optional[Dict[str, Any]] = None,
  187. ):
  188. from fairseq2.models.nllb.loader import load_nllb_model, load_nllb_config
  189. import sentencepiece as spm
  190. from torch.ao.quantization.qconfig import default_dynamic_qconfig, float_qparams_weight_only_qconfig
  191. model_config = load_nllb_config(model_name)
  192. hparams = flatten_config(
  193. dataclasses.asdict(model_config), separator="__", overrides=hparams,
  194. )
  195. hparams["multilingual"] = False
  196. model = load_nllb_model(model_name)
  197. # quantize the non-scripted model to optimize the output size
  198. torch.ao.quantization.quantize_dynamic(
  199. model,
  200. {
  201. torch.nn.Linear: default_dynamic_qconfig,
  202. torch.nn.Embedding: float_qparams_weight_only_qconfig,
  203. },
  204. dtype=torch.qint8,
  205. inplace=True,
  206. )
  207. def _read_vocab(vocab_file: str) -> List[Tuple[str, float]]:
  208. sp = spm.SentencePieceProcessor(vocab_file)
  209. return [
  210. (sp.id_to_piece(id), sp.get_score(id)) for id in range(sp.get_piece_size()) # type: ignore[no-member]
  211. ]
  212. src_vocab = _read_vocab(src_vocab)
  213. tgt_vocab = _read_vocab(tgt_vocab)
  214. return model, hparams, src_vocab, tgt_vocab
  215. def convert_model(
  216. model_name: Union[str, torch.nn.Module],
  217. out: Optional[Path] = None,
  218. model_type: ModelType = ModelType.AUTO,
  219. layers: str = "",
  220. hparams: Optional[Dict[str, Any]] = None,
  221. vocab: Optional[str] = None, # optional vocabulary files if stored separately
  222. extra_vocab: Optional[str] = None, # additional vocabulary, e.g. for target languages in bilingual models
  223. fp16: bool = False,
  224. ) -> None:
  225. """
  226. Entry function for converting different kinds of model into GGML file. Supported model checkpoints:
  227. - unity models
  228. - nllb models
  229. - Bilingual encoder-decoder model (Pytorch) with separate vocabulary for src and tgt languages
  230. - Bilingual encoder-decoder model (torchscript)
  231. Args:
  232. model_name: name of a registered model (discoverable in a fairseq2 asset), path to a checkpoint,\
  233. or the model object passed directly
  234. out: path to store the converted .ggml model. If None, the ggml model is stored in the same place\
  235. as input model
  236. model_type: type of the model (or inferred from the name, only applied to nllb, unity and seamless)
  237. layers: wildcard patterns to filter the layers from the model. Does not applied to scripted models
  238. hparams: override the hparams in the model with the user-defined values
  239. vocab: Path to vocabulary files (in case not bundled with the model checkpoint)
  240. extra_vocab: Path to additional vocabulary files (used in bilingual models with explicit tgt languages)
  241. fp16: Save to .GGML float16 tensors instead of float32
  242. """
  243. key_map: Optional[Dict[str, str]] = None
  244. tgt_vocab: Optional[List[Tuple[str, float]]] = None
  245. if isinstance(model_name, str):
  246. # Load the corresponding fairseq2 model
  247. if out is None:
  248. out = Path(model_name).with_suffix(".ggml")
  249. # Reason the model architecture from the model name or user input
  250. try:
  251. if model_type == ModelType.AUTO:
  252. if "unity" in model_name or "seamlessM4T" in model_name:
  253. model_type = ModelType.UNITY
  254. elif "nllb" in model_name:
  255. model_type = ModelType.NLLB
  256. assert (
  257. model_type != ModelType.AUTO
  258. ), "Cannot infer model type from the `model_name`. Please specify `model_type`"
  259. if model_type == ModelType.UNITY:
  260. model, hparams, vocab = convert_unity_model(model_name, hparams=hparams)
  261. elif model_type == ModelType.NLLB:
  262. model, hparams, vocab = convert_nllb_model(model_name, hparams=hparams)
  263. key_map = NLLB_2_UNITY_KEYMAP
  264. elif model_type == ModelType.BITEXT_SCRIPTED:
  265. # TODO: implement the EdgeML model conversion here
  266. raise NotImplementedError("Scripted model conversion not implemented yet")
  267. # Bilingual non-scripted model
  268. else:
  269. assert (
  270. vocab and extra_vocab
  271. ), "non-scripted model requires vocbulary files (SPM Protobuf format)"
  272. model, hparams, vocab, tgt_vocab = convert_bitext_model(
  273. model_name, hparams=hparams, src_vocab=vocab, tgt_vocab=extra_vocab
  274. )
  275. key_map = NLLB_2_UNITY_KEYMAP
  276. except Exception as exc:
  277. raise ValueError(f"Error in loading model: {model_name}") from exc
  278. else:
  279. # Use the model passed explicitly
  280. assert (
  281. out is not None
  282. ), "output path is required when explicitly passing a module"
  283. hparams = hparams or {}
  284. model = model_name
  285. state_dict = model.state_dict()
  286. if layers:
  287. state_dict = {k: v for k, v in state_dict.items() if re.match(layers, k)}
  288. fixup_model(model, state_dict, layer_filter=layers)
  289. if key_map:
  290. state_dict = convert_model_state_dict(state_dict, key_map=key_map)
  291. layer_config = read_layer_config(model, layer_filter=layers, key_map=key_map)
  292. vocab = vocab or []
  293. tgt_vocab = tgt_vocab or []
  294. write_ggml_file(out, hparams, layer_config, state_dict=state_dict, vocab=vocab, tgt_vocab=tgt_vocab, fp16=fp16)
  295. def find_children(model: torch.nn.Module, t: type, layer_filter: str = "") -> List[Tuple[str, torch.nn.Module]]:
  296. queue = list(model._modules.items())
  297. modules = []
  298. while queue:
  299. name, node = queue.pop()
  300. if node is None:
  301. continue
  302. if layer_filter and not re.match(layer_filter, name):
  303. continue
  304. if isinstance(node, t):
  305. modules.append((name, node))
  306. for child_name, child_node in node._modules.items():
  307. queue.append((".".join((name, child_name)), child_node))
  308. return modules
  309. def fixup_model(model: torch.nn.Module, state_dict: Dict[str, torch.Tensor], layer_filter: str) -> None:
  310. # Bake the embedding scaling into the weights
  311. frontends = find_children(model, TransformerEmbeddingFrontend, layer_filter)
  312. if frontends:
  313. log.info(
  314. "Upgrading the following TransformerEmbeddingFrontend: {}",
  315. [x[0] for x in frontends],
  316. )
  317. for name, frontend in frontends:
  318. embed_weights = state_dict[name + ".embed.weight"]
  319. state_dict[name + ".embed.weight"] = embed_weights * frontend.scale
  320. # Sinusoidal embeddings are typically not saved since they are easily recomputed,
  321. # but this allows to avoid porting the sinusoidal logic to GGML
  322. pos_encoders = find_children(model, SinusoidalPositionEncoder, layer_filter)
  323. if pos_encoders:
  324. log.info(
  325. "Upgrading the following SinusoidalPositionEncoder: {}",
  326. [x[0] for x in pos_encoders],
  327. )
  328. for name, pos_encoder in pos_encoders:
  329. assert isinstance(pos_encoder.freqs, torch.Tensor)
  330. assert name not in state_dict
  331. state_dict[name] = pos_encoder.freqs
  332. relative_pos_encs = find_children(model, RelativePositionalEncoding, layer_filter)
  333. # speech_encoder has several copies of the relative_pos_enc module.
  334. # For efficiency reasons we only make one copy of it to GGML.
  335. if relative_pos_encs:
  336. log.info("Merging all speech_encoder RelativePositionalEncoding into one.")
  337. _, rel_pos_enc = relative_pos_encs[0]
  338. assert isinstance(rel_pos_enc.freqs, torch.Tensor)
  339. state_dict["speech_encoder.pos_enc"] = rel_pos_enc.freqs
  340. def read_vocab_from_tokenizer(tokenizer: Any) -> List[Tuple[str, float]]:
  341. vocab_info = tokenizer.vocab_info
  342. vocab = [
  343. (tokenizer.model.index_to_token(i).replace("▁", " "), -i)
  344. for i in range(vocab_info.size)
  345. ]
  346. return vocab # type: ignore[return-value]
  347. def write_ggml_file(
  348. out: Path,
  349. hparams: Dict[str, Any],
  350. layer_config: Dict[str, Any],
  351. state_dict: Dict[str, torch.Tensor],
  352. vocab: List[Tuple[str, float]],
  353. tgt_vocab: Optional[List[Tuple[str, float]]] = None, # tgt_vocab for bilingual models
  354. fp16: bool = False,
  355. ) -> None:
  356. with out.open("wb") as o:
  357. write_ggml_header(o)
  358. write_hparams(o, hparams)
  359. write_hparams(o, layer_config)
  360. write_vocab(o, vocab)
  361. write_state_dict(o, state_dict, fp16)
  362. write_vocab(o, tgt_vocab)
  363. def write_ggml_header(out: BufferedWriter) -> None:
  364. """Write GGML header (in reverse cause big-endian)"""
  365. out.write(b"ggml"[::-1])
  366. def write_hparams(out: BufferedWriter, hparams: Dict[str, Any]) -> None:
  367. """Write hyper parameters.
  368. :params hparams:
  369. flattened dict containing model's hyper parameters.
  370. """
  371. simple_vals = {}
  372. for key, value in hparams.items():
  373. try:
  374. simple_vals[key] = to_ctype(value)
  375. except ValueError:
  376. logging.warning(f"Skipping config for key {key}={value!r}")
  377. continue
  378. out.write(struct.pack("<q", len(simple_vals)))
  379. for key, (ctype, cvalue) in simple_vals.items():
  380. write_string(out, key)
  381. b = struct.pack(ctype, cvalue)
  382. assert len(b) == 8
  383. out.write(b)
  384. logging.info(f"Saved {len(simple_vals)} params.")
  385. def write_vocab(out: BufferedWriter, vocab: List[Tuple[str, float]]) -> None:
  386. out.write(struct.pack("<q", len(vocab)))
  387. if len(vocab) == 0:
  388. return
  389. # Write all words concatenated in a buffer
  390. words = [bytes(w, "utf8") for w, score in vocab]
  391. packed_words = b"\0".join(words)
  392. # We use i32 to allow reusing the string loading codes
  393. packed_len = struct.pack("<i", len(packed_words))
  394. out.write(packed_len)
  395. out.write(packed_words)
  396. lengths = torch.tensor([len(w) for w in words], dtype=torch.int8)
  397. write_tensor(out, lengths)
  398. scores = torch.tensor([score for w, score in vocab], dtype=torch.float32)
  399. write_tensor(out, scores)
  400. def write_state_dict(
  401. out: BufferedWriter, state_dict: Dict[str, torch.Tensor], fp16: bool
  402. ) -> None:
  403. """Write pytorch state dict.
  404. :params state_dict:
  405. state dict returned by pytorch model
  406. :params fp16:
  407. convert float32 tensors to float16 on disk
  408. """
  409. out.write(struct.pack("<q", len(state_dict)))
  410. # True size of each tensor (before downcasting to float16)
  411. true_byte_size = sum(x.numel() * x.element_size() for x in state_dict.values())
  412. out.write(struct.pack("<q", true_byte_size))
  413. GB = 1024**3
  414. if not fp16:
  415. log.warning(
  416. f"Saving a ggml file with {len(state_dict)} tensors, totalling {true_byte_size / GB:.3f}Gb"
  417. )
  418. else:
  419. def _fp16_byte_size(x: torch.Tensor) -> int:
  420. full_byte_size = x.numel() * x.element_size()
  421. if fp16 and x.dtype == torch.float32:
  422. full_byte_size //= 2
  423. return full_byte_size
  424. # Compressed size
  425. compressed_byte_size = sum(_fp16_byte_size(x) for x in state_dict.values())
  426. log.warning(
  427. f"Saving a ggml file with {len(state_dict)} tensors, totalling {true_byte_size / GB:.3f}Gb"
  428. f". Compressed to {compressed_byte_size / GB:.3f}Gb"
  429. )
  430. for key, value in state_dict.items():
  431. write_string(out, key)
  432. if key.endswith(".bias") and value.ndim == 1 and "adaptor" not in key:
  433. # GGML broadcasting isn't as strong as numpy
  434. value = value.reshape(1, -1)
  435. if "pointwise_conv" in key: # pointwise_conv / depthwise_conv
  436. value = value.squeeze(-1)
  437. if "depthwise_conv" in key:
  438. value = value.squeeze(1)
  439. if fp16 and value.dtype == torch.float32:
  440. value = value.to(torch.float16)
  441. write_tensor(out, value.contiguous())
  442. def write_string(out: BufferedWriter, value: str) -> None:
  443. """Write string in utf-8 format.
  444. :params value:
  445. string value to dump.
  446. """
  447. str_ = value.encode("utf-8")
  448. packed_len = struct.pack("<i", len(str_))
  449. assert len(packed_len) == 4
  450. out.write(packed_len)
  451. out.write(str_)
  452. def write_tensor(out: BufferedWriter, value: torch.Tensor) -> None:
  453. """Write torch tensor in ggml format.
  454. First we save the number of dimensions and the dtype.
  455. Then we save the data as numpy array.
  456. :params value:
  457. Tensor to dump.
  458. """
  459. if value.dtype is torch.int64:
  460. # GGML doesn't have int64, downcast it
  461. value = value.to(dtype=torch.int32)
  462. if value.ndim == 0:
  463. # GGML doesn't support scalar as tensors.
  464. value = value.reshape(1)
  465. data = value.numpy()
  466. n_dims = data.ndim
  467. assert n_dims < 5, "ggml doesn't support 5 dims tensors"
  468. assert n_dims >= 1, "ggml doesn't support 0 dim tensors"
  469. ftype = torch_to_ggml_type(value.dtype)
  470. out.write(struct.pack("<i", n_dims))
  471. out.write(struct.pack("<i", ftype))
  472. for i in range(n_dims):
  473. # ggml uses long for shape
  474. out.write(struct.pack("<q", data.shape[n_dims - 1 - i]))
  475. data.tofile(out)
  476. def torch_to_ggml_type(dtype: torch.dtype) -> int:
  477. if dtype is torch.float32:
  478. return ggml.GGML_TYPE_F32
  479. elif dtype is torch.float16:
  480. return ggml.GGML_TYPE_F16
  481. elif dtype is torch.int32:
  482. return ggml.GGML_TYPE_I32
  483. elif dtype is torch.int8:
  484. return ggml.GGML_TYPE_I8
  485. else:
  486. raise NotImplementedError(f"{dtype} is not mapped to a GGML_TYPE")
  487. def flatten_config(
  488. config: Dict[str, Any],
  489. separator: str,
  490. overrides: Optional[Dict[str, Any]] = None,
  491. ) -> Dict[str, Any]:
  492. """Flatten nested dictionnary
  493. :param config:
  494. nested dictionnary containing model config.
  495. :param separator:
  496. string separator used when flattening nested hparams
  497. :param config_preprocessor:
  498. Preprocessor used for config/hparams values
  499. :returns:
  500. flat dictionnary
  501. """
  502. def __flatten(config: Dict[str, Any], prefix: str = "") -> Dict[str, Any]:
  503. result = {}
  504. for key in config:
  505. new_key = f"{prefix}{key}"
  506. if isinstance(config[key], dict):
  507. nested_result = __flatten(config[key], f"{new_key}{separator}")
  508. result.update(nested_result)
  509. else:
  510. new_config = config[key]
  511. if new_config is not None:
  512. result[new_key] = config[key]
  513. return result
  514. res_config = __flatten(config)
  515. if overrides:
  516. return {**res_config, **overrides}
  517. else:
  518. return res_config
  519. def read_layer_config(
  520. model: torch.nn.Module, layer_filter: str, key_map: Optional[Dict[str, str]] = None
  521. ) -> Dict[str, Any]:
  522. layer_config = {}
  523. def _append_node_config(node: Any, prefix: str) -> None:
  524. for k, v in node.__dict__.items():
  525. # Skip special members. In particular all children module and tensors
  526. # will be hidden in special dicts `_parameters` and `_modules`
  527. if k.startswith("_"):
  528. continue
  529. # All modules have a "training" flag
  530. if k in ("training", "init_fn"):
  531. continue
  532. if v is None:
  533. continue
  534. try:
  535. to_ctype(v)
  536. except ValueError:
  537. log.warning(f"Skipping layer config {k}={v!r}")
  538. continue
  539. layer_config[prefix + k] = v
  540. _append_node_config(model, "")
  541. for name, node in find_children(model, torch.nn.Module, layer_filter):
  542. _append_node_config(node, name + ".")
  543. key_map = key_map or {}
  544. keys_to_replace = []
  545. for k, v in layer_config.items():
  546. for old_pattern, replacement in key_map.items():
  547. if (new_key := re.sub(old_pattern, replacement, k)) != k:
  548. keys_to_replace.append((k, new_key))
  549. for old_key, new_key in keys_to_replace:
  550. layer_config[new_key] = layer_config.pop(old_key)
  551. return layer_config
  552. def to_ctype(value: Any) -> Tuple[str, Any]:
  553. """Transform python type to ctype.
  554. Note: we always use little-endian and 8-byte types.
  555. This make the format independent of the current platform.
  556. :params value:
  557. value to cast into ctype
  558. :returns:
  559. A tuple of ctype and cvalue.
  560. """
  561. if isinstance(value, int):
  562. return ("<q", value)
  563. if isinstance(value, float):
  564. return ("<d", value)
  565. if isinstance(value, bool):
  566. return ("<q", value)
  567. if isinstance(value, Enum):
  568. return ("<q", value.value)
  569. if isinstance(value, tuple) and len(value) == 1:
  570. return to_ctype(value[0])
  571. if isinstance(value, str) and len(value) < 8:
  572. value = bytes(value, "ascii")
  573. if len(value) < 8:
  574. value = value + (8 - len(value)) * b"\0"
  575. return ("8s", value)
  576. raise ValueError(f"Unsupported type {type(value)}")
  577. def get_cpp_type(value: Any) -> str:
  578. """Return equivalent cpp type in string format
  579. :params value:
  580. value to cast into ctype
  581. :returns:
  582. str containing cpp type
  583. """
  584. # used to have compatibility between types
  585. try:
  586. ctype, _ = to_ctype(value)
  587. except ValueError as e:
  588. return f"// Error: {e}"
  589. if ctype == "i":
  590. return "std::int32_t"
  591. if ctype == "l":
  592. return "std::int64_t"
  593. if ctype == "f":
  594. return "float"
  595. if ctype == "d":
  596. return "double"
  597. if ctype == "?":
  598. return "bool"
  599. raise RuntimeError(
  600. f"Should not have reached this part." f"Missing cpp translation for {ctype}"
  601. )
  602. def generate_hparams_struct(
  603. hparams: Dict[str, Any],
  604. struct_name: str,
  605. ) -> str:
  606. """Generate a c++ struct to hold the model hyper-parameters.
  607. :param hparams:
  608. Flattened config of the model.
  609. :param struct_name:
  610. Name of the generated struct.
  611. """
  612. struct = f"struct {struct_name} {{"
  613. fields = [f" {get_cpp_type(value)} {key};" for key, value in hparams.items()]
  614. struct = "\n".join([struct] + fields + ["};\n"])
  615. valid_fields = [
  616. key for key, value in hparams.items() if "Error" not in get_cpp_type(value)
  617. ]
  618. read_struct = f"void read_{struct_name}({struct_name}& out, std::ifstream &fin) {{"
  619. read_fields = [
  620. f" fin.read((char*) &out.{field}, sizeof(out.{field}));"
  621. for field in valid_fields
  622. ]
  623. read_struct = "\n".join([read_struct] + read_fields + ["};\n"])
  624. return "\n".join([struct, read_struct])
  625. if __name__ == "__main__":
  626. import func_argparse
  627. func_argparse.single_main(convert_model)