test_unity_cpp.py 24 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776
  1. import ggml
  2. import ctypes
  3. import torch
  4. import pytest
  5. import numpy as np
  6. import torch
  7. import fairseq2.nn
  8. import fairseq2.nn.transformer
  9. import logging
  10. import sys
  11. import functools
  12. from typing import Tuple
  13. from pathlib import Path
  14. from ctypes_utils import Ptr
  15. from ctypes import c_void_p
  16. from typing import Any
  17. from pathlib import Path
  18. from typing import Iterator
  19. from ggml import NativeObj
  20. from ggml_convert import convert_model, read_layer_config
  21. from seamless_communication.models.inference.translator import Translator, Modality
  22. from fairseq2.data.audio import WaveformToFbankConverter
  23. import torchaudio
  24. from ctypes_utils import NULLPTR
  25. from fairseq2.models.wav2vec2.feature_extractor import Wav2Vec2FbankFeatureExtractor
  26. Ctx = ggml.ggml_context_p
  27. UNITY_MODELS = Path(__file__).parent / "examples/unity/models"
  28. CTX_PARAMS = ggml.ggml_init_params(mem_size=1024 * 1024 * 1024 * 5, mem_buffer=None)
  29. FAIRSEQ2_CPP = Path(__file__).parent / "examples/unity/fairseq2.cpp"
  30. UNITY_FLASH_ATTN = "\n# define UNITY_FLASH_ATTN 0\n" not in FAIRSEQ2_CPP.read_text()
  31. DATA = Path(__file__).parent
  32. @pytest.fixture(name="ctx")
  33. def _ctx() -> Iterator[Ctx]:
  34. """Allocate a new context with 1024 MB of memory"""
  35. try:
  36. ctx = ggml.ggml_init(params=CTX_PARAMS)
  37. with torch.inference_mode():
  38. yield ctx
  39. finally:
  40. ggml.ggml_free(ctx)
  41. @functools.lru_cache()
  42. def _load_g_model_once() -> NativeObj:
  43. model_file = Path(__file__).parent / "seamlessM4T_medium.ggml"
  44. if not model_file.exists():
  45. convert_model("seamlessM4T_medium", model_file)
  46. return ggml.load_fairseq2_ggml_file(model_file)
  47. @pytest.fixture()
  48. def g_model(ctx: Ctx) -> c_void_p:
  49. model = _load_g_model_once()
  50. ggml.lib.fairseq2_model_set_inference_ctx(model.ptr, ctx)
  51. return model.ptr
  52. @functools.lru_cache(maxsize=1)
  53. def load_translator() -> Translator:
  54. return Translator(
  55. "seamlessM4T_medium", "vocoder_36langs", torch.device("cpu"), torch.float32
  56. )
  57. def load_pt_model() -> Any:
  58. return load_translator().model
  59. def test_convert_linear(tmp_path: Path) -> None:
  60. module = fairseq2.nn.Linear(16, 24, True)
  61. layer_config = read_layer_config(module)
  62. assert layer_config == {"input_dim": 16, "output_dim": 24, "skip_init": False}
  63. module_file = Path("module.ggml")
  64. convert_model(module, module_file)
  65. g_module = ggml.load_fairseq2_ggml_file(module_file)
  66. for k, v in layer_config.items():
  67. assert (
  68. ggml.fairseq2_model_layer_config_int(g_module.ptr, bytes(k, "ascii")) == v
  69. )
  70. def test_causal_attention_mask(ctx: Ctx):
  71. x = torch.zeros((1, 10, 32))
  72. generator = fairseq2.nn.transformer.CausalAttentionMaskGenerator()
  73. mask_exp = generator(x).numpy()
  74. gx = ggml.from_numpy(ctx, x)
  75. gmask = ggml.causal_attention_mask(ctx, gx)
  76. mask = ggml.to_numpy(gmask)
  77. gf = ggml.ggml_build_forward(gmask)
  78. ggml.ggml_graph_compute_with_ctx(ctx, ctypes.pointer(gf), 1)
  79. assert mask_exp.shape == (10, 10)
  80. assert mask.shape == (10, 10)
  81. assert np.all(mask == mask_exp)
  82. x = x[:, :8, :]
  83. mask_exp = generator(x).numpy()
  84. gx = ggml.from_numpy(ctx, x)
  85. gmask = ggml.causal_attention_mask(ctx, gx)
  86. mask = ggml.to_numpy(gmask)
  87. gf = ggml.ggml_build_forward(gmask)
  88. ggml.ggml_graph_compute_with_ctx(ctx, ctypes.pointer(gf), 1)
  89. assert mask_exp.shape == (8, 8)
  90. assert mask.shape == (8, 8)
  91. assert np.all(mask == mask_exp)
  92. def test_LayerNorm_forward(ctx: Ctx, g_model: c_void_p) -> None:
  93. x = torch.empty((2, 21, 1024))
  94. torch.nn.init.uniform_(x, -1, 1)
  95. pt_model = load_pt_model()
  96. y_exp = pt_model.text_encoder.layers[0].ffn_layer_norm(x).numpy()
  97. gx = ggml.from_numpy(ctx, x)
  98. gy = ggml.forward("LayerNorm", g_model, "text_encoder.layers.0.ffn_layer_norm", gx)
  99. ggml.build_and_compute(ctx, gy)
  100. y = ggml.to_numpy(gy)
  101. assert np.allclose(y_exp, y, atol=1e-5)
  102. def test_Linear_forward(ctx: Ctx, g_model: c_void_p) -> None:
  103. x = torch.empty((2, 21, 1024))
  104. torch.nn.init.uniform_(x, -1, 1)
  105. pt_model = load_pt_model()
  106. y_exp = pt_model.text_encoder.layers[0].ffn.inner_proj(x).numpy()
  107. gx = ggml.from_numpy(ctx, x)
  108. gy = ggml.forward("Linear", g_model, "text_encoder.layers.0.ffn.inner_proj", gx)
  109. ggml.build_and_compute(ctx, gy)
  110. y = ggml.to_numpy(gy)
  111. assert np.allclose(y_exp, y, atol=1e-5)
  112. def test_FeedForwardNetwork_forward(ctx: Ctx, g_model: c_void_p) -> None:
  113. x = torch.empty((2, 21, 1024)) # (bs, seq_len, model_dim)
  114. torch.nn.init.uniform_(x, -1 / 32, 1 / 32)
  115. # Test FFN without LayerNorm
  116. pt_model = load_pt_model()
  117. y_exp = pt_model.text_encoder.layers[0].ffn(x).numpy()
  118. gx = ggml.from_numpy(ctx, x)
  119. gy = ggml.forward(
  120. "StandardFeedForwardNetwork", g_model, "text_encoder.layers.0.ffn", gx
  121. )
  122. ggml.build_and_compute(ctx, gy)
  123. y = ggml.to_numpy(gy)
  124. assert np.allclose(y_exp, y, atol=1e-5)
  125. @pytest.mark.parametrize("lengths", [(11, 21), (21, 13)])
  126. def test_MultiheadAttention_forward(
  127. ctx: Ctx, g_model: c_void_p, lengths: Tuple[int, int]
  128. ) -> None:
  129. x = torch.empty((2, 21, 1024))
  130. torch.random.manual_seed(0)
  131. torch.nn.init.uniform_(x, -1, 1)
  132. # Note: we use different lengths for queries and keys,
  133. # this tests the implementation in decoding context too.
  134. # Note2: ggml_flash_attn requires that we have more keys than queries
  135. # qlen, klen = (11, 21) if flash_attn else (21, 13)
  136. qlen, klen = lengths
  137. xq = x[:, :qlen]
  138. xk = x[:, :klen]
  139. if qlen > klen and UNITY_FLASH_ATTN:
  140. pytest.skip(reason="flash_attn requires qlen > klen")
  141. gxq = ggml.from_numpy(ctx, xq.contiguous())
  142. gxk = ggml.from_numpy(ctx, xk.contiguous())
  143. ggml.ggml_set_name(gxk, b"xk")
  144. gy = ggml.forward(
  145. "MultiheadAttention",
  146. g_model,
  147. "text_encoder.layers.0.self_attn",
  148. gxq,
  149. gxk,
  150. gxk,
  151. NULLPTR, # TODO: tests with causal attention masks
  152. )
  153. gf = ggml.ggml_build_forward(gy)
  154. ggml.ggml_graph_compute_with_ctx(ctx, ctypes.pointer(gf), 1)
  155. pt_model = load_pt_model()
  156. self_attn = pt_model.text_encoder.layers[0].self_attn
  157. q_exp = self_attn.q_proj(xq).numpy()
  158. y = ggml.to_numpy(gy)
  159. nodes = ggml.nodes(gf)
  160. attn_weights_hook = fairseq2.nn.transformer.StoreAttentionWeights([])
  161. self_attn.register_attn_weight_hook(attn_weights_hook)
  162. y_exp = self_attn(xq, None, xk, xk).numpy()
  163. q = ggml.to_numpy(nodes[b"q"])
  164. assert q.shape == q_exp.shape
  165. assert np.allclose(q_exp, q, atol=1e-5)
  166. # with flash_attn we don't have attn_weights
  167. naive_attn = b"attn_weights" in nodes
  168. if naive_attn:
  169. attn_weights = ggml.to_numpy(nodes[b"attn_weights"])
  170. [attn_weights_exp] = attn_weights_hook._storage
  171. attn_weights_exp = attn_weights_exp.numpy()
  172. assert attn_weights_exp.shape == attn_weights.shape
  173. # GGML is very agressively reducing small softmax weights to 0,
  174. # so the error isn't that small
  175. assert np.allclose(attn_weights_exp, attn_weights, atol=1e-3)
  176. # But the sums should be close to 1
  177. assert np.allclose(np.sum(attn_weights, axis=-1), np.ones((2 * 16, qlen)))
  178. # And the maximum index should match the original ones.
  179. assert np.allclose(
  180. np.argmax(attn_weights_exp, axis=-1), np.argmax(attn_weights, axis=-1)
  181. )
  182. assert y.shape == y_exp.shape
  183. assert np.allclose(y_exp, y, atol=1e-2 if naive_attn else 1e-4)
  184. def test_MultiheadAttention_forward_self_attn_with_cache(
  185. ctx: Ctx, g_model: c_void_p
  186. ) -> None:
  187. pt_model = load_pt_model()
  188. attn = pt_model.text_decoder.layers[0].self_attn
  189. x = torch.empty((2, 21, 1024))
  190. torch.random.manual_seed(0)
  191. torch.nn.init.uniform_(x, -1, 1)
  192. state_bag = fairseq2.nn.IncrementalStateBag()
  193. ggml.fairseq2_kv_cache_alloc(g_model, 2, 21)
  194. # Incremental decoding
  195. for t in range(3):
  196. xq = x[:, t : t + 1]
  197. y_exp = attn(xq, None, xq, xq, state_bag=state_bag).numpy()
  198. assert y_exp.shape == (2, 1, 1024)
  199. gxq = ggml.from_numpy(ctx, xq.contiguous())
  200. ggml.ggml_set_name(gxq, b"xq")
  201. gy = ggml.forward(
  202. "MultiheadAttention",
  203. g_model,
  204. "text_decoder.layers.0.self_attn",
  205. gxq,
  206. gxq,
  207. gxq,
  208. None, # type: ignore
  209. )
  210. gf = ggml.ggml_build_forward(gy)
  211. ggml.ggml_graph_compute_with_ctx(ctx, ctypes.pointer(gf), 1)
  212. nodes = ggml.nodes(gf)
  213. state = state_bag.get_state(
  214. attn, fairseq2.nn.transformer.MultiheadAttentionState
  215. )
  216. assert state is not None
  217. assert np.allclose(
  218. state.prev_k.numpy(),
  219. ggml.to_numpy(nodes[b"text_decoder.layers.0.self_attn.k_cache (step=%d)" % t]),
  220. atol=1e-3,
  221. )
  222. y = ggml.to_numpy(gy)
  223. assert np.allclose(y, y_exp, atol=1e-2)
  224. def test_MultiheadAttention_forward_cross_attn_with_cache(
  225. ctx: Ctx, g_model: c_void_p
  226. ) -> None:
  227. pt_model = load_pt_model()
  228. attn = pt_model.text_decoder.layers[0].encoder_decoder_attn
  229. x = torch.empty((2, 21, 1024))
  230. torch.random.manual_seed(0)
  231. torch.nn.init.uniform_(x, -1, 1)
  232. state_bag = fairseq2.nn.IncrementalStateBag()
  233. ggml.fairseq2_kv_cache_alloc(g_model, 2, 21)
  234. # Incremental decoding, the keys come from the encoder, and don't change during decoding
  235. xk = x[:, :11]
  236. gxk = ggml.from_numpy(ctx, xk.contiguous(), name=b"xk")
  237. for t in range(3):
  238. xq = x[:, t : t + 1]
  239. gxq = ggml.from_numpy(ctx, xq.contiguous())
  240. ggml.ggml_set_name(gxq, b"xq")
  241. gy = ggml.forward(
  242. "MultiheadAttention",
  243. g_model,
  244. "text_decoder.layers.0.encoder_decoder_attn",
  245. gxq,
  246. gxk,
  247. gxk,
  248. None, # type: ignore
  249. )
  250. gf = ggml.ggml_build_forward(gy)
  251. ggml.ggml_graph_compute_with_ctx(ctx, ctypes.pointer(gf), 1)
  252. y = ggml.to_numpy(gy)
  253. nodes = ggml.nodes(gf)
  254. leaves = ggml.leafs(gf)
  255. if t > 0:
  256. # the cache only appear in the graph during the second call
  257. state = state_bag.get_state(
  258. attn, fairseq2.nn.transformer.MultiheadAttentionState
  259. )
  260. assert state is not None
  261. assert np.allclose(
  262. state.prev_k.numpy(),
  263. ggml.to_numpy(nodes[b"text_decoder.layers.0.encoder_decoder_attn.k_cache"]),
  264. atol=1e-3,
  265. )
  266. y_exp = attn(xq, None, xk, xk, state_bag=state_bag).numpy()
  267. assert y_exp.shape == (2, 1, 1024)
  268. assert np.allclose(y, y_exp, atol=1e-2)
  269. def test_StandardTransformerEncoderLayer_forward(ctx: Ctx, g_model: c_void_p) -> None:
  270. x = torch.empty((2, 21, 1024))
  271. padding_mask = torch.ones((2, 21))
  272. torch.random.manual_seed(0)
  273. torch.nn.init.uniform_(x, -1, 1)
  274. pt_model = load_pt_model()
  275. layer = pt_model.text_encoder.layers[0]
  276. gx = ggml.from_numpy(ctx, x)
  277. ggml.ggml_set_name(gx, b"x")
  278. gpad = ggml.from_numpy(ctx, padding_mask)
  279. ggml.ggml_set_name(gpad, b"padding_mask")
  280. gy = ggml.forward(
  281. "StandardTransformerEncoderLayer",
  282. g_model,
  283. "text_encoder.layers.0",
  284. gx,
  285. None, # TODO support padding mask
  286. )
  287. gf = ggml.ggml_build_forward(gy)
  288. ggml.ggml_graph_compute_with_ctx(ctx, ctypes.pointer(gf), 1)
  289. y = ggml.to_numpy(gy)
  290. y_exp, _ = layer(x, padding_mask)
  291. y_exp = y_exp.numpy()
  292. assert y.shape == y_exp.shape
  293. assert np.allclose(y_exp, y, atol=1e-4 if UNITY_FLASH_ATTN else 1e-2)
  294. def test_StandardConformerEncoderLayer_forward(ctx: Ctx, g_model: c_void_p) -> None:
  295. pt_model = load_pt_model()
  296. x = torch.load(
  297. "/private/home/dnn/internal_sc/seamless_communication/ggml/examples/unity/dev/seqs_before_conformer_block.pt"
  298. )
  299. padding_mask = torch.ones((1, x.shape[1]))
  300. layer = pt_model.speech_encoder.inner.layers[0]
  301. gx = ggml.from_numpy(ctx, x[0])
  302. ggml.ggml_set_name(gx, b"x")
  303. gpad = ggml.from_numpy(ctx, padding_mask[0])
  304. ggml.ggml_set_name(gpad, b"padding_mask")
  305. gy = ggml.forward(
  306. "StandardConformerEncoderLayer",
  307. g_model,
  308. "speech_encoder.inner.layers.0",
  309. gx,
  310. None, # TODO support padding mask
  311. )
  312. gf = ggml.ggml_build_forward(gy)
  313. ggml.ggml_graph_compute_with_ctx(ctx, ctypes.pointer(gf), 1)
  314. y = ggml.to_numpy(gy)
  315. y_exp, _ = layer(x, padding_mask)
  316. y_exp = y_exp.numpy()
  317. assert y.shape == y_exp.shape
  318. assert np.allclose(y_exp, y, atol=2e-3)
  319. def test_StandardConformerEncoderAdaptorLayer_forward(
  320. ctx: Ctx, g_model: c_void_p
  321. ) -> None:
  322. pt_model = load_pt_model()
  323. x = torch.load(
  324. "/private/home/dnn/internal_sc/seamless_communication/ggml/examples/unity/dev/seqs_before_adaptor.pt"
  325. )
  326. layer = pt_model.speech_encoder.adaptor_layers[0]
  327. gx = ggml.from_numpy(ctx, x[0])
  328. ggml.ggml_set_name(gx, b"x")
  329. gy = ggml.forward(
  330. "StandardConformerEncoderAdaptorLayer",
  331. g_model,
  332. "speech_encoder.adaptor_layers.0",
  333. gx,
  334. None, # TODO support padding mask
  335. )
  336. gf = ggml.ggml_build_forward(gy)
  337. ggml.ggml_graph_compute_with_ctx(ctx, ctypes.pointer(gf), 1)
  338. y = ggml.to_numpy(gy)
  339. y_exp, _ = layer(x, None)
  340. y_exp = y_exp.numpy()
  341. assert y.shape == y_exp.shape
  342. assert np.allclose(y_exp, y, atol=2e-3)
  343. def test_StandardTransformerEncoder_forward(ctx: Ctx, g_model: c_void_p) -> None:
  344. x = torch.empty((2, 21, 1024))
  345. padding_mask = torch.ones((2, 21))
  346. torch.random.manual_seed(0)
  347. torch.nn.init.uniform_(x, -1, 1)
  348. gx = ggml.from_numpy(ctx, x)
  349. ggml.ggml_set_name(gx, b"x")
  350. gpad = ggml.from_numpy(ctx, padding_mask)
  351. ggml.ggml_set_name(gpad, b"padding_mask")
  352. gy = ggml.forward(
  353. "StandardTransformerEncoder",
  354. g_model,
  355. "text_encoder",
  356. gx,
  357. None, # TODO support padding mask
  358. )
  359. gf = ggml.ggml_build_forward(gy)
  360. ggml.ggml_graph_compute_with_ctx(ctx, ctypes.pointer(gf), 1)
  361. y = ggml.to_numpy(gy)
  362. pt_model = load_pt_model()
  363. y_exp, _ = pt_model.text_encoder(x, padding_mask)
  364. y_exp = y_exp.numpy()
  365. assert y.shape == y_exp.shape
  366. assert np.allclose(y_exp, y, atol=1e-4)
  367. def test_StandardConformerEncoder_forward(ctx: Ctx, g_model: c_void_p) -> None:
  368. pt_model = load_pt_model()
  369. wav, _ = torchaudio.load(DATA / "test.wav")
  370. gx = ggml.from_numpy(ctx, wav * 2**15) # Apply scale before sending into ggml!
  371. ggml.ggml_set_name(gx, b"x")
  372. gy = ggml.forward(
  373. "StandardConformerEncoder",
  374. g_model,
  375. "speech_encoder",
  376. gx,
  377. None, # TODO support padding mask
  378. )
  379. gf = ggml.ggml_build_forward(gy)
  380. ggml.ggml_graph_compute_with_ctx(ctx, ctypes.pointer(gf), 1)
  381. converter = WaveformToFbankConverter(
  382. num_mel_bins=80,
  383. waveform_scale=2**15,
  384. channel_last=True,
  385. standardize=True,
  386. )
  387. converter_input = {
  388. "waveform": wav.transpose(0, 1),
  389. "sample_rate": 16000.0,
  390. "format": -1,
  391. }
  392. y = ggml.to_numpy(gy)
  393. speech_encoder_input = pt_model.speech_encoder_frontend(
  394. converter(converter_input)["fbank"].unsqueeze(0), None
  395. )[0]
  396. y_exp, _ = pt_model.speech_encoder(speech_encoder_input, None)
  397. y_exp = y_exp.numpy() # remove batch dimension
  398. assert y.shape == y_exp.shape
  399. assert np.allclose(
  400. y_exp, y, atol=1e-2
  401. ) # There are 10 elements in a 137*1024 tensor with error >1e-2
  402. def test_WaveformToFbank_forward(ctx: Ctx, g_model: c_void_p) -> None:
  403. pt_model = load_pt_model()
  404. converter = WaveformToFbankConverter(
  405. num_mel_bins=80,
  406. waveform_scale=2**15,
  407. channel_last=True,
  408. standardize=True,
  409. )
  410. extractor = Wav2Vec2FbankFeatureExtractor(80, 2, 1)
  411. wav, _ = torchaudio.load(
  412. "/private/home/dnn/internal_sc/seamless_communication/ggml/examples/unity/test.wav"
  413. )
  414. gx = ggml.from_numpy(ctx, wav * 2**15) # Apply scale before sending into ggml!
  415. ggml.ggml_set_name(gx, b"x")
  416. gy = ggml.forward("WaveformToFbank", g_model, "", gx)
  417. gf = ggml.ggml_build_forward(gy)
  418. ggml.ggml_graph_compute_with_ctx(ctx, ctypes.pointer(gf), 1)
  419. y = ggml.to_numpy(gy)
  420. converter_input = {
  421. "waveform": wav.transpose(0, 1),
  422. "sample_rate": 16000.0,
  423. "format": -1,
  424. }
  425. y_exp = extractor(converter(converter_input)["fbank"].unsqueeze(0), None)[0]
  426. y_exp = y_exp.numpy()
  427. assert y.shape == y_exp.shape
  428. assert np.allclose(y_exp, y, atol=4e-3) # reduce? error is from standardization
  429. def test_causal_attention_mask(ctx: Ctx):
  430. x = torch.zeros((5, 10))
  431. generator = fairseq2.nn.transformer.CausalAttentionMaskGenerator()
  432. mask_exp = generator(x)
  433. gx = ggml.from_numpy(ctx, x)
  434. gmask = ggml.causal_attention_mask(ctx, gx)
  435. mask = ggml.to_numpy(gmask)
  436. assert mask_exp.shape == (10, 10)
  437. assert mask.shape == (10, 10)
  438. assert np.allclose(mask, mask_exp)
  439. def test_PositionalEmbedding_forward(ctx: Ctx, g_model: c_void_p) -> None:
  440. seq = torch.zeros((4, 20, 1024), dtype=torch.float32)
  441. # this _legacy_pad_idx is suspicious. Shouldn't the model use 1 ? But
  442. # this is consistent with pt_model.text_decoder_frontend.pos_encoder._sin_offset
  443. pos_encoder = fairseq2.nn.SinusoidalPositionEncoder(1024, 55, _legacy_pad_idx=0)
  444. y_exp = pos_encoder(seq, None)[0].numpy()
  445. gseq = ggml.from_numpy(ctx, seq[0].numpy())
  446. ggml.ggml_set_name(gseq, b"seq")
  447. gy = ggml.forward(
  448. "PositionalEmbedding", g_model, "text_decoder_frontend.pos_encoder", gseq
  449. )
  450. gf = ggml.ggml_build_forward(gy)
  451. ggml.ggml_graph_compute_with_ctx(ctx, ctypes.pointer(gf), 1)
  452. y = ggml.to_numpy(gy)
  453. assert y.shape == y_exp.shape
  454. assert np.allclose(y_exp, y, atol=1e-6)
  455. def test_TransformerEmbeddingFrontend_forward(ctx: Ctx, g_model: c_void_p) -> None:
  456. seq = torch.arange(2 * 20).reshape(2, 20)
  457. seq[1, 15:] = 0 # padding for second sentence
  458. seq_len = torch.tensor([20, 15])
  459. gseq = ggml.from_numpy(ctx, seq.numpy().astype(np.int32))
  460. ggml.ggml_set_name(gseq, b"seq")
  461. gy = ggml.forward(
  462. "TransformerEmbeddingFrontend", g_model, "text_decoder_frontend", gseq
  463. )
  464. ggml.build_and_compute(ctx, gy)
  465. y = ggml.to_numpy(gy)
  466. pt_model = load_pt_model()
  467. y_exp, _ = pt_model.text_decoder_frontend(seq, seq_len)
  468. y_exp = y_exp.numpy()
  469. assert y.shape == y_exp.shape
  470. assert np.allclose(y_exp, y, atol=1e-6)
  471. def test_StandardTransformerDecoder_forward(ctx: Ctx, g_model: c_void_p) -> None:
  472. x = torch.empty((2, 13, 1024))
  473. encoder_out = torch.empty((2, 21, 1024))
  474. padding_mask = torch.ones((2, 13))
  475. torch.random.manual_seed(0)
  476. torch.nn.init.uniform_(x, -1, 1)
  477. torch.nn.init.uniform_(encoder_out, -1, 1)
  478. gx = ggml.from_numpy(ctx, x)
  479. ggml.ggml_set_name(gx, b"x")
  480. gpad = ggml.from_numpy(ctx, padding_mask)
  481. ggml.ggml_set_name(gpad, b"padding_mask")
  482. genc = ggml.from_numpy(ctx, encoder_out)
  483. gy = ggml.forward(
  484. "StandardTransformerDecoder",
  485. g_model,
  486. "text_decoder",
  487. gx,
  488. None, # TODO support padding mask,
  489. genc,
  490. None,
  491. )
  492. ggml.build_and_compute(ctx, gy)
  493. y = ggml.to_numpy(gy)
  494. pt_model = load_pt_model()
  495. y_exp, _ = pt_model.text_decoder(x, padding_mask, encoder_out, None)
  496. y_exp = y_exp.numpy()
  497. assert y.shape == y_exp.shape
  498. assert np.allclose(y_exp, y, atol=1e-4 if UNITY_FLASH_ATTN else 1e-3)
  499. def test_t2tt(ctx: Ctx, g_model: c_void_p) -> None:
  500. src_lang = "eng"
  501. src_text = "We are all in a yellow submarine."
  502. tgt_lang = "fra"
  503. sample_file = DATA / "sample_input.npz"
  504. beam_size = 2
  505. if not sample_file.exists():
  506. translator = load_translator()
  507. device = translator.device
  508. token_encoder = translator.text_tokenizer.create_encoder(
  509. task="translation", lang=src_lang, mode="source", device=device
  510. )
  511. src = translator.collate(token_encoder(src_text))
  512. text_out, _ = translator.get_prediction(
  513. translator.model,
  514. translator.text_tokenizer,
  515. translator.unit_tokenizer,
  516. src,
  517. input_modality=Modality.TEXT,
  518. output_modality=Modality.TEXT,
  519. tgt_lang=tgt_lang,
  520. beam_size=beam_size,
  521. )
  522. tgt_text = str(text_out.sentences[0])
  523. assert tgt_text == "Nous sommes tous dans un sous-marin jaune."
  524. hypotheses = [
  525. {
  526. "seq": h.seq.tolist(),
  527. "score": h.score.item(),
  528. "step_scores": h.step_scores.numpy(),
  529. }
  530. for h in text_out.generator_output.results[0]
  531. ]
  532. np.savez(
  533. sample_file,
  534. encoder_output=text_out.encoder_output.numpy(),
  535. encoder_padding_mask=text_out.encoder_padding_mask.numpy(),
  536. hypotheses=hypotheses,
  537. )
  538. # allow_pickle to load the hyp dicts
  539. text_out = np.load(sample_file, allow_pickle=True)
  540. encoder_out = ggml.from_numpy(ctx, text_out["encoder_output"])
  541. encoder_padding_mask = ggml.from_numpy(ctx, text_out["encoder_padding_mask"])
  542. prefix_seq = np.array(text_out["hypotheses"][0]["seq"][:2]).astype(np.int32)
  543. max_seq_len = max(len(h["seq"]) for h in text_out["hypotheses"])
  544. opts = ggml.SequenceGeneratorOptions(
  545. beam_size=beam_size,
  546. min_seq_len=1,
  547. soft_max_seq_len_a=1,
  548. soft_max_seq_len_b=200,
  549. hard_max_seq_len=int(max_seq_len * 1.5),
  550. len_penalty=1.0,
  551. unk_penalty=0.0,
  552. normalize_scores=True,
  553. )
  554. job = ggml.SequenceGeneratorJob(
  555. opts=opts,
  556. prefix_seq=ggml.from_numpy(ctx, prefix_seq),
  557. pad_idx=0,
  558. unk_idx=1,
  559. bos_idx=2,
  560. eos_idx=3,
  561. )
  562. result_ptr = ggml.generate_sequence(
  563. g_model, job, encoder_out, encoder_padding_mask, ctx
  564. )
  565. results = [result_ptr[i] for i in range(beam_size) if result_ptr[i].seq != None]
  566. assert len(results) == len(text_out["hypotheses"])
  567. for g_hyp, exp in zip(results, text_out["hypotheses"]):
  568. g_tokens = list(ggml.to_numpy(g_hyp.seq))
  569. g_step_scores = ggml.to_numpy(g_hyp.step_scores)
  570. assert g_tokens == exp["seq"]
  571. assert g_hyp.score == pytest.approx(exp["score"], rel=1e-2)
  572. # The score error is big, this may negatively impact the beam search.
  573. assert np.allclose(g_step_scores, exp["step_scores"], atol=0.1)
  574. def test_s2tt(ctx: Ctx, g_model: c_void_p):
  575. src_audio_wav, _ = torchaudio.load(DATA / "test.wav")
  576. # translator = load_translator()
  577. # token_encoder = translator.text_tokenizer.create_encoder(
  578. # task="translation"
  579. # )
  580. # decoded_audio = {
  581. # "waveform": src_audio_wav.t(),
  582. # "sample_rate": 16000.,
  583. # "format": -1,
  584. # }
  585. # src = translator.collate(translator.convert_to_fbank(decoded_audio))["fbank"]
  586. # text_out, _ = translator.get_prediction(
  587. # translator.model,
  588. # translator.text_tokenizer,
  589. # translator.unit_tokenizer,
  590. # src,
  591. # input_modality=Modality.SPEECH,
  592. # output_modality=Modality.TEXT,
  593. # tgt_lang="cmn",
  594. # )
  595. # tgt_text = str(text_out.sentences[0])
  596. # assert tgt_text == "大家好 , 世界无主题。"
  597. # tgt_tokens = text_out.generator_output.results[0][0].seq
  598. # score = text_out.generator_output.results[0][0].score.item()
  599. tgt_tokens = [
  600. 3,
  601. 256200,
  602. 16991,
  603. 249346,
  604. 249725,
  605. 146,
  606. 25220,
  607. 251069,
  608. 249211,
  609. 251148,
  610. 253935,
  611. 3,
  612. ] # "大家好 , 世界无主题。"
  613. score = -1.606838583946228
  614. gx = ggml.from_numpy(
  615. ctx, src_audio_wav * 2**15
  616. ) # Apply scale before sending into ggml!
  617. ggml.ggml_set_name(gx, b"x")
  618. gy = ggml.forward(
  619. "StandardConformerEncoder",
  620. g_model,
  621. "speech_encoder",
  622. gx,
  623. None, # TODO support padding mask
  624. )
  625. gf = ggml.ggml_build_forward(gy)
  626. ggml.ggml_graph_compute_with_ctx(ctx, ctypes.pointer(gf), 1)
  627. encoder_out = gy
  628. opts = ggml.SequenceGeneratorOptions(
  629. beam_size=5,
  630. soft_max_seq_len_a=1,
  631. soft_max_seq_len_b=200,
  632. hard_max_seq_len=1000,
  633. )
  634. job = ggml.SequenceGeneratorJob(
  635. opts=opts,
  636. prefix_seq=ggml.from_numpy(ctx, np.array([3, 256200]).astype(np.int32)),
  637. pad_idx=0,
  638. unk_idx=1,
  639. bos_idx=2,
  640. eos_idx=3,
  641. )
  642. result_ptr = ggml.generate_sequence(g_model, job, encoder_out, NULLPTR, ctx)
  643. g_tokens = list(ggml.to_numpy(result_ptr[0].seq))
  644. assert g_tokens == tgt_tokens