test_unity_cpp.py 16 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464
  1. import ggml
  2. import ctypes
  3. import torch
  4. import pytest
  5. import numpy as np
  6. import torch
  7. import fairseq2.nn
  8. import fairseq2.nn.transformer
  9. from typing import Any
  10. from pathlib import Path
  11. from typing import Iterator
  12. from ggml import NativeObj
  13. from ggml_convert import convert_model
  14. from seamless_communication.models.unity import load_unity_model
  15. Ctx = ggml.ggml_context_p
  16. UNITY_MODELS = Path(__file__).parent / "examples/unity/models"
  17. PARAMS_16MB = ggml.ggml_init_params(mem_size=16 * 1024 * 1024, mem_buffer=None)
  18. @pytest.fixture(name="ctx")
  19. def _ctx() -> Iterator[Ctx]:
  20. """Allocate a new context with 16 MB of memory"""
  21. try:
  22. ctx = ggml.ggml_init(params=PARAMS_16MB)
  23. yield ctx
  24. finally:
  25. ggml.ggml_free(ctx)
  26. def test_ggml_bindings_work(ctx: Ctx) -> None:
  27. # Instantiate tensors
  28. x = ggml.ggml_new_tensor_1d(ctx, ggml.GGML_TYPE_F32, 1)
  29. a = ggml.ggml_new_tensor_1d(ctx, ggml.GGML_TYPE_F32, 1)
  30. b = ggml.ggml_new_tensor_1d(ctx, ggml.GGML_TYPE_F32, 1)
  31. # Use ggml operations to build a computational graph
  32. x2 = ggml.ggml_mul(ctx, x, x)
  33. f = ggml.ggml_add(ctx, ggml.ggml_mul(ctx, a, x2), b)
  34. gf = ggml.ggml_build_forward(f)
  35. # Set the input values
  36. ggml.ggml_set_f32(x, 2.0)
  37. ggml.ggml_set_f32(a, 3.0)
  38. ggml.ggml_set_f32(b, 4.0)
  39. # Compute the graph
  40. ggml.ggml_graph_compute_with_ctx(ctx, ctypes.pointer(gf), 1)
  41. # Get the output value
  42. output = ggml.ggml_get_f32_1d(f, 0)
  43. assert output == 16.0
  44. def test_ggml_matmul(ctx: Ctx) -> None:
  45. # Instantiate tensors
  46. a = ggml.ggml_new_tensor_2d(ctx, ggml.GGML_TYPE_F32, 4, 2)
  47. x = ggml.ggml_new_tensor_2d(ctx, ggml.GGML_TYPE_F32, 4, 3)
  48. # Use ggml operations to build a computational graph
  49. y = ggml.ggml_mul_mat(ctx, a, x)
  50. assert ggml.shape(y) == (3, 2)
  51. gf = ggml.ggml_build_forward(y)
  52. # Set the input values
  53. ggml.ggml_set_f32(x, 0.0)
  54. for i in range(4 * 3):
  55. ggml.ggml_set_f32_1d(x, i, i)
  56. ggml.ggml_set_f32(a, 0.0)
  57. ggml.ggml_set_f32_1d(a, 1, 1.0)
  58. ggml.ggml_set_f32_1d(a, 7, 1.0)
  59. ggml.ggml_graph_compute_with_ctx(ctx, ctypes.pointer(gf), 1)
  60. output = [[ggml.ggml_get_f32_1d(y, j * 2 + i) for j in range(3)] for i in range(2)]
  61. assert output == [[1, 5, 9], [3, 7, 11]]
  62. def test_shape_works(ctx: Ctx) -> None:
  63. """GGML shape order convention is the reverse from numpy"""
  64. a = ggml.ggml_new_tensor_1d(ctx, ggml.GGML_TYPE_F32, 10)
  65. assert ggml.shape(a) == (10,)
  66. b = ggml.ggml_new_tensor_2d(ctx, ggml.GGML_TYPE_F32, 11, 21)
  67. assert ggml.shape(b) == (21, 11)
  68. c = ggml.ggml_new_tensor_3d(ctx, ggml.GGML_TYPE_F32, 12, 22, 32)
  69. assert ggml.shape(c) == (32, 22, 12)
  70. def test_nb_works(ctx: Ctx) -> None:
  71. a = ggml.ggml_new_tensor_1d(ctx, ggml.GGML_TYPE_F32, 10)
  72. assert ggml.nb(a) == (4, 40, 40, 40)
  73. b = ggml.ggml_new_tensor_2d(ctx, ggml.GGML_TYPE_F16, 11, 21)
  74. assert ggml.nb(b) == (2, 22, 462, 462)
  75. c = ggml.ggml_new_tensor_3d(ctx, ggml.GGML_TYPE_F32, 12, 22, 32)
  76. assert ggml.nb(c) == (4, 48, 1056, 33792)
  77. @pytest.mark.xfail(reason="TODO: fix strides")
  78. def test_strides_works(ctx: Ctx) -> None:
  79. a = ggml.ggml_new_tensor_1d(ctx, ggml.GGML_TYPE_F32, 10)
  80. assert ggml.strides(a) == np.ones((10,), dtype=np.float32).strides
  81. b = ggml.ggml_new_tensor_2d(ctx, ggml.GGML_TYPE_F32, 11, 21)
  82. assert ggml.strides(b) == np.ones((11, 21), dtype=np.float32).strides
  83. c = ggml.ggml_new_tensor_3d(ctx, ggml.GGML_TYPE_F32, 12, 22, 32)
  84. assert ggml.strides(c) == np.ones((12, 22, 32), dtype=np.float32).strides
  85. def test_to_numpy_works_with_f32(ctx: Ctx) -> None:
  86. a = ggml.ggml_new_tensor_1d(ctx, ggml.GGML_TYPE_F32, 10)
  87. na = ggml.to_numpy(a)
  88. for i in range(10):
  89. ggml.ggml_set_f32_1d(a, i, i)
  90. assert na[5] == 5
  91. assert np.allclose(na, np.array(range(10), dtype=np.float32))
  92. ggml.ggml_set_f32_1d(a, 5, -1.5)
  93. assert na[5] == -1.5
  94. # Note: GGML order of dims is reversed wrt numpy shapes
  95. b = ggml.ggml_new_tensor_2d(ctx, ggml.GGML_TYPE_F32, 11, 21)
  96. for i in range(11 * 21):
  97. ggml.ggml_set_f32_1d(b, i, i)
  98. nb = ggml.to_numpy(b)
  99. # assert nb.shape == (21, 11)
  100. assert nb[0, 5] == 5
  101. assert nb[3, 5] == 11 * 3 + 5
  102. assert np.allclose(
  103. nb, np.array(range(11 * 21), dtype=np.float32).reshape(ggml.shape(b))
  104. )
  105. ggml.ggml_set_f32_1d(b, 11 * 3 + 5, -1.5)
  106. assert nb[3, 5] == -1.5
  107. sum_rows = ggml.ggml_sum_rows(ctx, b)
  108. gf = ggml.ggml_build_forward(sum_rows)
  109. ggml.ggml_graph_compute_with_ctx(ctx, ctypes.pointer(gf), 1)
  110. np_sum_rows = np.sum(nb, axis=-1, keepdims=True)
  111. assert np_sum_rows.shape == ggml.shape(sum_rows)
  112. for i in range(11):
  113. assert np_sum_rows[i] == ggml.ggml_get_f32_1d(sum_rows, i)
  114. c = ggml.ggml_new_tensor_3d(ctx, ggml.GGML_TYPE_F32, 12, 22, 32)
  115. for i in range(12 * 22 * 32):
  116. ggml.ggml_set_f32_1d(c, i, i)
  117. nc = ggml.to_numpy(c)
  118. assert ggml.shape(c) == (32, 22, 12)
  119. assert nc[3, 5, 11] == 22 * 12 * 3 + 12 * 5 + 11
  120. assert np.allclose(
  121. nc, np.array(range(12 * 22 * 32), dtype=np.float32).reshape(ggml.shape(c))
  122. )
  123. ggml.ggml_set_f32_1d(c, 22 * 12 * 3 + 12 * 5 + 11, -1.5)
  124. assert nc[3, 5, 11] == -1.5
  125. def test_from_numpy_works_with_f32(ctx: Ctx) -> None:
  126. a = np.random.normal(size=(10,)).astype(dtype=np.float32)
  127. ga = ggml.from_numpy(ctx, a)
  128. assert ggml.shape(ga) == (10,)
  129. assert ggml.nb(ga) == ggml.nb(ggml.ggml_new_tensor_1d(ctx, ggml.GGML_TYPE_F32, 10))
  130. assert np.allclose(a, ggml.to_numpy(ga))
  131. a = np.random.normal(size=(11, 21)).astype(dtype=np.float32)
  132. ga = ggml.from_numpy(ctx, a)
  133. assert ggml.shape(ga) == (11, 21)
  134. assert ggml.nb(ga) == ggml.nb(
  135. ggml.ggml_new_tensor_2d(ctx, ggml.GGML_TYPE_F32, *a.shape[::-1])
  136. )
  137. assert np.allclose(a, ggml.to_numpy(ga))
  138. a = np.random.normal(size=(12, 22, 32)).astype(dtype=np.float32)
  139. ga = ggml.from_numpy(ctx, a)
  140. assert ggml.shape(ga) == (12, 22, 32)
  141. assert ggml.nb(ga) == ggml.nb(
  142. ggml.ggml_new_tensor_3d(ctx, ggml.GGML_TYPE_F32, *a.shape[::-1])
  143. )
  144. assert np.allclose(a, ggml.to_numpy(ga))
  145. def test_to_numpy_works_with_f16(ctx: Ctx) -> None:
  146. # We explicitly fill the tensor otherwise they might have non-zero values in them.
  147. a = ggml.ggml_new_tensor_1d(ctx, ggml.GGML_TYPE_F16, 10)
  148. na = ggml.to_numpy(a)
  149. ggml.ggml_set_f32(a, 2.14)
  150. assert np.allclose(na, np.ones((10,), dtype=np.float16) * 2.14)
  151. ggml.ggml_set_f32(a, 4.28)
  152. assert np.allclose(na, np.ones((10,), dtype=np.float16) * 4.28)
  153. b = ggml.ggml_new_tensor_2d(ctx, ggml.GGML_TYPE_F16, 11, 21)
  154. nb = ggml.to_numpy(b)
  155. ggml.ggml_set_f32(b, 4.18)
  156. assert np.allclose(nb, np.ones((21, 11), dtype=np.float16) * 4.18)
  157. ggml.ggml_set_f32(b, 5.12)
  158. assert np.allclose(nb, np.ones((21, 11), dtype=np.float16) * 5.12)
  159. c = ggml.ggml_new_tensor_3d(ctx, ggml.GGML_TYPE_F16, 12, 22, 32)
  160. nc = ggml.to_numpy(c)
  161. ggml.ggml_set_f32(c, 3.16)
  162. assert np.allclose(nc, np.ones((32, 22, 12), dtype=np.float16) * 3.16)
  163. ggml.ggml_set_f32(c, 5.08)
  164. assert np.allclose(nc, np.ones((32, 22, 12), dtype=np.float16) * 5.08)
  165. def test_from_numpy_works_with_f16(ctx: Ctx) -> None:
  166. a = np.random.normal(size=(10,)).astype(dtype=np.float16)
  167. ga = ggml.from_numpy(ctx, a)
  168. assert np.allclose(a, ggml.to_numpy(ga))
  169. a = np.random.normal(size=(11, 21)).astype(dtype=np.float16)
  170. ga = ggml.from_numpy(ctx, a)
  171. assert np.allclose(a, ggml.to_numpy(ga))
  172. a = np.random.normal(size=(12, 22, 32)).astype(dtype=np.float16)
  173. ga = ggml.from_numpy(ctx, a)
  174. assert np.allclose(a, ggml.to_numpy(ga))
  175. def test_to_numpy_works_with_transposed(ctx: Ctx) -> None:
  176. ga = ggml.ggml_new_tensor_2d(ctx, ggml.GGML_TYPE_F32, 10, 5)
  177. a = ggml.to_numpy(ga)
  178. a[...] = np.arange(50).reshape(5, 10).astype(dtype=np.float32)
  179. gat = ggml.ggml_transpose(ctx, ga)
  180. gf = ggml.ggml_build_forward(ga)
  181. ggml.ggml_graph_compute_with_ctx(ctx, ctypes.pointer(gf), 1)
  182. at = ggml.to_numpy(gat)
  183. assert np.allclose(a.T, at)
  184. def test_ning_model_load(ctx: Ctx) -> None:
  185. pytest.skip("borken")
  186. model, vocab = ggml.unity_model_load(UNITY_MODELS / "unity-large/ggml-model.bin")
  187. print(model, vocab)
  188. example = ggml.from_file(
  189. ctx, UNITY_MODELS / "unity-large/seqs_before_conformer_block.bin", (1024, 137)
  190. )
  191. with ggml.MeasureArena() as arena:
  192. graph = ggml.unity_audio_encoder_graph(model, example)
  193. # TODO: why the extra memory ?
  194. mem_size = ggml.ggml_allocr_alloc_graph(arena, graph) + ggml.GGML_MEM_ALIGN
  195. with ggml.FixedSizeArena(mem_size) as allocr:
  196. print(
  197. f"unity_audio_encoder_graph: compute buffer size: {mem_size/1024/1024} MB"
  198. )
  199. eval_res_ptr = ggml.unity_eval(allocr, model, example, 1)
  200. eval_res = eval_res_ptr.contents
  201. inpL = ggml.to_numpy(eval_res.nodes[eval_res.n_nodes - 1])
  202. expected_raw = "-0.1308,0.0346,-0.2656,0.2873,-0.0104,0.0574,0.4033,-0.1125,-0.0460,-0.0496"
  203. expected = map(float, expected_raw.split(","))
  204. assert np.allclose(inpL[0, :10], list(expected), atol=1e-4)
  205. @pytest.fixture(scope="module")
  206. def g_model_once() -> Iterator[ctypes.c_void_p]:
  207. model_file = Path(__file__).parent / "seamlessM4T_medium.ggml"
  208. if not model_file.exists():
  209. convert_model("seamlessM4T_medium", model_file)
  210. with ggml.load_unity_ggml_file(model_file) as model:
  211. yield model
  212. @pytest.fixture()
  213. def g_model(ctx: Ctx, g_model_once: ctypes.c_void_p) -> ctypes.c_void_p:
  214. ggml.lib.fairseq2_model_set_inference_ctx(g_model_once, ctx)
  215. return g_model_once
  216. @pytest.fixture(scope="module")
  217. def pt_model() -> Iterator[Any]:
  218. model = load_unity_model("seamlessM4T_medium")
  219. print(model)
  220. model.eval()
  221. with torch.inference_mode():
  222. yield model
  223. @pytest.mark.xfail(reason="TODO")
  224. def test_hparams_code_is_up_to_date() -> None:
  225. model_file = Path(__file__).parent / "seamlessM4T_medium.ggml"
  226. hparams_header_file = model_file.with_suffix(".hparams.h")
  227. hparams_struct = hparams_header_file.read_text().strip()
  228. actual_code = (UNITY_MODELS.parent / "unity_model_loader.h").read_text()
  229. assert hparams_struct in actual_code
  230. def test_numpy_mul_mat(ctx: Ctx) -> None:
  231. slen, d_in, d_out = (5, 4, 2)
  232. # torch.nn and fairseq2.nn assumes (seq_len, dim) to represent inputs,
  233. x = np.zeros((slen, d_in), dtype=np.float32) # (seq_len, dim_in)
  234. x[0, :] = [1, 1 / 3, 0, 0]
  235. weight = np.eye(d_out, d_in, dtype=np.float32)
  236. weight[1, 1] = 1
  237. # assert weight.shape == (d_out, d_in) # (dim_out, dim_in)
  238. y_exp = x @ weight.T # (seq_len, dim_out)
  239. gx = ggml.from_numpy(ctx, x) # (dim_in, seq_len)
  240. gw = ggml.from_numpy(ctx, weight) # (dim_in, dim_out)
  241. # gb = ggml.from_numpy(ctx, linear.bias.numpy()) # (dim_out)
  242. # GGML linear impl
  243. assert ggml.ggml_can_mul_mat(gw, gx)
  244. # gy = ggml.ggml_add(ctx, ggml.ggml_mul_mat(ctx, gw, gx), gb) # (dim_out, seq_len)
  245. gy = ggml.ggml_mul_mat(ctx, gw, gx) # (dim_out, seq_len)
  246. gf = ggml.ggml_build_forward(gy)
  247. ggml.ggml_graph_compute_with_ctx(ctx, ctypes.pointer(gf), 1)
  248. y = ggml.to_numpy(gf.nodes[gf.n_nodes - 1])
  249. assert np.allclose(y_exp, y)
  250. @torch.no_grad()
  251. def test_torch_spda_vs_ggml_flash_attn(ctx: Ctx) -> None:
  252. slen, d_in, num_heads = (5, 4, 2)
  253. torch.random.manual_seed(0)
  254. q = torch.zeros((num_heads, slen, d_in))
  255. torch.nn.init.uniform_(q, -1, 1)
  256. k = torch.zeros((num_heads, slen, d_in))
  257. torch.nn.init.uniform_(k, -1, 1)
  258. v = torch.zeros((num_heads, slen, d_in))
  259. torch.nn.init.uniform_(v, -1, 1)
  260. # Note: we are using x for both keys and queries, so every position
  261. # attends mostly to itself, hence y_exp looks a bit like arange(slen)
  262. y_exp = torch.nn.functional.scaled_dot_product_attention(q, k, v, is_causal=True)
  263. y_exp = y_exp.numpy()
  264. gq = ggml.from_numpy(ctx, q.numpy())
  265. gk = ggml.from_numpy(ctx, k.numpy())
  266. # ggml flash attention expect a different order of axis for v:
  267. gv = ggml.from_numpy(ctx, v.transpose(1, 2).contiguous().numpy())
  268. assert ggml.shape(gv) == (num_heads, d_in, slen)
  269. gy = ggml.ggml_flash_attn(ctx, gq, gk, gv, True)
  270. gf = ggml.ggml_build_forward(gy)
  271. ggml.ggml_graph_compute_with_ctx(ctx, ctypes.pointer(gf), 1)
  272. y = ggml.to_numpy(gy)
  273. assert np.allclose(y_exp, y)
  274. def test_ggml_softmax_vs_torch(ctx: Ctx) -> None:
  275. x = torch.empty((5, 8, 4))
  276. torch.nn.init.uniform_(x, -1, 1)
  277. y_exp = torch.softmax(x, dim=-1).numpy()
  278. gx = ggml.from_numpy(ctx, x.numpy())
  279. gy = ggml.ggml_soft_max(ctx, gx)
  280. y = ggml.to_numpy(gy)
  281. gf = ggml.ggml_build_forward(gy)
  282. ggml.ggml_graph_compute_with_ctx(ctx, ctypes.pointer(gf), 1)
  283. assert np.allclose(y_exp, y, rtol=1e-3)
  284. def test_forward_ffn(ctx: Ctx, g_model: NativeObj, pt_model: Any) -> None:
  285. x = torch.empty((21, 1024)) # (seq_len, model_dim)
  286. torch.nn.init.uniform_(x, -1 / 32, 1 / 32)
  287. # Test FFN without LayerNorm
  288. y_exp = pt_model.text_encoder.layers[0].ffn(x).numpy()
  289. gx = ggml.from_numpy(ctx, x)
  290. gy = ggml.forward(
  291. "StandardFeedForwardNetwork", g_model, "text_encoder.layers.0.ffn", gx
  292. )
  293. gf = ggml.ggml_build_forward(gy)
  294. ggml.ggml_graph_compute_with_ctx(ctx, ctypes.pointer(gf), 1)
  295. y = ggml.to_numpy(gf.nodes[gf.n_nodes - 1])
  296. assert np.allclose(y_exp, y, rtol=2e-2, atol=1e-4)
  297. def test_forward_layer_norm(ctx: Ctx, g_model: NativeObj, pt_model: Any) -> None:
  298. x = torch.empty((21, 1024))
  299. torch.nn.init.uniform_(x, -1, 1)
  300. y_exp = pt_model.text_encoder.layers[0].ffn_layer_norm(x).numpy()
  301. gx = ggml.from_numpy(ctx, x)
  302. gy = ggml.forward("LayerNorm", g_model, "text_encoder.layers.0.ffn_layer_norm", gx)
  303. gf = ggml.ggml_build_forward(gy)
  304. ggml.ggml_graph_compute_with_ctx(ctx, ctypes.pointer(gf), 1)
  305. y = ggml.to_numpy(gf.nodes[gf.n_nodes - 1])
  306. assert np.allclose(y_exp, y, rtol=1e-3, atol=1e-4)
  307. def _name(tensor: ggml.ggml_tensor_p) -> bytes:
  308. try:
  309. return tensor.contents.name
  310. except ValueError:
  311. return b"???"
  312. def test_forward_self_attn(ctx: Ctx, g_model: NativeObj, pt_model: Any) -> None:
  313. x = torch.empty((1, 21, 1024))
  314. torch.random.manual_seed(0)
  315. torch.nn.init.uniform_(x, -1, 1)
  316. self_attn = pt_model.text_encoder.layers[0].self_attn
  317. # Replace spda by just returning queries
  318. # TODO: implement spda
  319. # self_attn.spda = lambda *qkv, **kwargs: qkv[0]
  320. gx = ggml.from_numpy(ctx, x[0])
  321. gxk = ggml.from_numpy(ctx, x[0, :11, :])
  322. gxv = ggml.from_numpy(ctx, x[0, :11, :])
  323. ggml.ggml_set_name(gx, b"x")
  324. gy = ggml.forward(
  325. "MultiheadAttention",
  326. g_model,
  327. "text_encoder.layers.0.self_attn",
  328. gx,
  329. gxk,
  330. gxv,
  331. None,
  332. )
  333. gf = ggml.ggml_build_forward(gy)
  334. ggml.ggml_graph_compute_with_ctx(ctx, ctypes.pointer(gf), 1)
  335. q_exp = self_attn._project_q(x, None, None).squeeze(0).numpy()
  336. y = ggml.to_numpy(gy)
  337. nodes = {}
  338. for i in range(gf.n_nodes):
  339. name = _name(gf.nodes[i])
  340. children = [_name(gf.nodes[i].contents.src[j]) for j in range(2)]
  341. print(name, f"op({gf.nodes[i].contents.op})", children)
  342. nodes[name] = ggml.to_numpy(gf.nodes[i])
  343. attn_weights_hook = fairseq2.nn.transformer.StoreAttentionWeights([])
  344. self_attn.register_attn_weight_hook(attn_weights_hook)
  345. y_exp = self_attn(x, None, x[:, :11, :], x[:, :11, :]).numpy()
  346. y_exp = y_exp.squeeze(0) # remove batch dimension
  347. q = nodes[b"q"]
  348. assert q.shape == q_exp.shape
  349. assert np.allclose(q_exp, q, atol=1e-5)
  350. attn_exp, attn_weights_exp = map(lambda t: t.squeeze(0).numpy(), attn_weights_hook._storage[0])
  351. attn_weights = nodes[b"attn_weights"]
  352. assert attn_weights_exp.shape == attn_weights.shape
  353. # GGML is very agressively reducing small softmax weights to 0.
  354. # Not sure to what this is due.
  355. assert np.allclose(attn_weights_exp, attn_weights, atol=1e-3)
  356. attn_exp = attn_exp.transpose(0, 2, 1)
  357. attn = nodes[b"attn"]
  358. assert attn_exp.shape == attn.shape
  359. # Because of rounding errors in softmax, it's even worse here.
  360. assert np.allclose(attn_exp, attn, atol=1e-2)
  361. assert y.shape == y_exp.shape
  362. assert np.allclose(y_exp, y, atol=1e-2)