fairseq2.cpp 33 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936
  1. #include <math.h>
  2. #include "ggml.h"
  3. #include "fairseq2.h"
  4. #include <unordered_map>
  5. #include <algorithm>
  6. /// allocate the fairseq2 model and hyperparameters
  7. extern "C" fairseq2_model* fairseq2_model_alloc() {
  8. // pre-allocate some memory to write hyperparameters and tensors pointers
  9. auto* model = new fairseq2_model;
  10. model->hparams = new std::uint8_t[8 * 1024];
  11. model->arch = new std::uint64_t[16 * 1024]; // max tensors allowed
  12. model->tensors_ctx = nullptr;
  13. return model;
  14. };
  15. extern "C" void fairseq2_model_free(fairseq2_model* model) {
  16. if (model->tensors_ctx) ggml_free(model->tensors_ctx);
  17. delete (std::uint64_t*)(model->arch);
  18. delete (std::uint8_t*)model->hparams;
  19. delete model;
  20. };
  21. extern "C" void fairseq2_model_set_inference_ctx(fairseq2_model* model, ggml_context* ctx) {
  22. model->ctx = ctx;
  23. }
  24. extern "C" std::string* std_string_alloc(char* c_str) {
  25. return new std::string(c_str);
  26. }
  27. extern "C" void std_string_free(std::string* str) {
  28. delete str;
  29. }
  30. bool has_layer(fairseq2_model& model, const std::string& name) {
  31. return model.tensors.find(name) != model.tensors.end();
  32. }
  33. extern "C" ggml_tensor* Linear_forward(
  34. fairseq2_model& model,
  35. const std::string &prefix,
  36. ggml_tensor* input // (d_in)
  37. ) {
  38. // Note: for now we assumed un-batched input
  39. ggml_tensor* weight = model.tensors[prefix + ".weight"]; // (d_in, d_out)
  40. GGML_ASSERT(weight != nullptr);
  41. ggml_tensor* bias = model.tensors[prefix + ".bias"]; // (d_out)
  42. GGML_ASSERT(bias != nullptr);
  43. return ggml_add(
  44. model.ctx,
  45. ggml_mul_mat(model.ctx, weight, input), // (d_out)
  46. bias
  47. );
  48. }
  49. extern "C" ggml_tensor* LayerNorm_forward(
  50. fairseq2_model& model,
  51. const std::string &prefix,
  52. ggml_tensor* input
  53. ) {
  54. ggml_tensor* weight = model.tensors[prefix + ".weight"];
  55. GGML_ASSERT(weight != nullptr);
  56. ggml_tensor* bias = model.tensors[prefix + ".bias"];
  57. GGML_ASSERT(bias != nullptr);
  58. auto ctx = model.ctx;
  59. // TODO: should `eps` be part of unity hparams ?
  60. input = ggml_norm(ctx, input, /*eps*/1e-5);
  61. return ggml_add(
  62. ctx,
  63. ggml_mul(ctx, ggml_repeat(ctx, weight, input), input),
  64. ggml_repeat(ctx, bias, input)
  65. );
  66. }
  67. extern "C" ggml_tensor* StandardFeedForwardNetwork_forward(
  68. fairseq2_model& model,
  69. const std::string& prefix,
  70. ggml_tensor* seqs
  71. ) {
  72. seqs = Linear_forward(model, prefix + ".inner_proj", seqs);
  73. // inner_activation = ReLu // TODO: allow other activation
  74. seqs = ggml_relu(model.ctx, seqs);
  75. if (has_layer(model, prefix + ".inner_layer_norm")) {
  76. seqs = LayerNorm_forward(model, prefix + ".inner_layer_norm", seqs);
  77. }
  78. seqs = Linear_forward(model, prefix + ".output_proj", seqs);
  79. return seqs;
  80. }
  81. ggml_tensor* reshape_num_head(ggml_context* ctx, ggml_tensor* x, int num_heads) {
  82. int slen = x->ne[1];
  83. int model_dim = x->ne[0];
  84. // (S, dim) -> (S, H, H_dim)
  85. x = ggml_reshape_3d(ctx, x, model_dim / num_heads, num_heads, slen);
  86. // (S, H, H_dim) -> (H, S, H_dim)
  87. x = ggml_permute(ctx, x, 0, 2, 1, 3);
  88. return x;
  89. }
  90. # define UNITY_FLASH_ATTN
  91. extern "C" ggml_tensor* MultiheadAttention_forward(
  92. fairseq2_model& model,
  93. const std::string &prefix,
  94. ggml_tensor* queries, // (slen, d_in)
  95. ggml_tensor* keys, // (klen, d_in)
  96. ggml_tensor* values, // (klen, d_out)
  97. ggml_tensor* mask // (klen, slen)
  98. ) {
  99. int slen = queries->ne[1];
  100. int slenk = keys->ne[1];
  101. int num_heads = 16;
  102. int head_dim = queries->ne[0] / num_heads;
  103. ggml_context* ctx = model.ctx;
  104. ggml_tensor* q = Linear_forward(model, prefix + ".q_proj", queries);
  105. q = reshape_num_head(ctx, q, num_heads); // (H, S, H_dim)
  106. ggml_set_name(q, "q");
  107. ggml_tensor* k = Linear_forward(model, prefix + ".k_proj", keys);
  108. k = reshape_num_head(ctx, k, num_heads); // (H, Sk, H_dim)
  109. ggml_set_name(k, "k");
  110. ggml_tensor* v = Linear_forward(model, prefix + ".v_proj", values);
  111. v = ggml_reshape_3d(ctx, v, head_dim, num_heads, slenk); // (Sk, H, H_dim)
  112. v = ggml_permute(ctx, v, 1, 2, 0, 3); // (H, H_dim, Sk)
  113. v = ggml_cont(ctx, v);
  114. ggml_set_name(v, "v");
  115. #ifdef UNITY_FLASH_ATTN
  116. // For flash_attn, we assume either no masks, or triangular masks.
  117. ggml_tensor* attn = ggml_flash_attn(ctx, q, k, v, /*masked*/mask != nullptr); // (H, S, H_dim)
  118. ggml_set_name(attn, "attn");
  119. attn = ggml_permute(ctx, attn, 0, 2, 1, 3); // (S, H, H_dim)
  120. attn = ggml_cont(ctx, attn);
  121. attn = ggml_reshape_2d(ctx, attn, num_heads * head_dim, slen); // (S, H * H_dim)
  122. #else
  123. // (H, Sk, H_dim) x (H, S, H_dim) -> (H, S, Sk)
  124. ggml_tensor* qk = ggml_mul_mat(ctx, k, q);
  125. ggml_set_name(qk, "qk");
  126. ggml_tensor* qk_scale = ggml_new_tensor_1d(ctx, qk->type, 1);
  127. ggml_set_f32(qk_scale, 1.0f/sqrtf(float(head_dim)));
  128. qk = ggml_scale(ctx, qk, qk_scale);
  129. ggml_set_name(qk, "qk_scaled");
  130. if (mask) qk = ggml_add(ctx, qk, mask);
  131. // TODO: upgrade qk to float32 if needed
  132. ggml_tensor* attn_weights = ggml_soft_max(ctx, qk); // (H, Sk, S)
  133. ggml_set_name(attn_weights, "attn_weights");
  134. // (H, S, Sk) x (H, H_dim, Sk) -> (H, H_dim, S)
  135. ggml_tensor* attn = ggml_mul_mat(ctx, attn_weights, v);
  136. ggml_set_name(attn, "attn");
  137. attn = ggml_reshape_2d(ctx, attn, slen, num_heads * head_dim); // (H * H_dim, S)
  138. attn = ggml_transpose(ctx, attn); // (S, H * H_dim)
  139. // // I'm not sure why this one is needed ...
  140. attn = ggml_cont(ctx, attn);
  141. #endif // UNITY_FLASH_ATTN
  142. // out -> (S, d_out)
  143. ggml_tensor* out = Linear_forward(model, prefix + ".output_proj", attn);
  144. ggml_set_name(out, "out");
  145. return out;
  146. }
  147. extern "C" ggml_tensor* StandardTransformerEncoderLayer_forward(
  148. fairseq2_model& model,
  149. const std::string& prefix,
  150. ggml_tensor* seqs,
  151. ggml_tensor* padding_mask
  152. ) {
  153. ggml_context* ctx = model.ctx;
  154. // TODO: read norm_order from model
  155. auto norm_order = TRANSFORMER_NORM_ORDER_PRE;
  156. // _forward_self_attn(seqs, padding_mask)
  157. auto residual = seqs;
  158. if (norm_order != TRANSFORMER_NORM_ORDER_POST)
  159. seqs = LayerNorm_forward(model, prefix + ".self_attn_layer_norm", seqs);
  160. // TODO: add padding_mask to MultiheadAttention_forward
  161. GGML_ASSERT(padding_mask == nullptr);
  162. seqs = MultiheadAttention_forward(
  163. model,
  164. prefix + ".self_attn",
  165. seqs,
  166. seqs,
  167. seqs,
  168. /*attention masks=*/nullptr
  169. );
  170. if (has_layer(model, prefix + ".self_attn_norm"))
  171. seqs = LayerNorm_forward(model, prefix + ".self_attn_norm", seqs);
  172. seqs = ggml_add(ctx, seqs, residual);
  173. if (norm_order == TRANSFORMER_NORM_ORDER_POST)
  174. seqs = LayerNorm_forward(model, prefix + ".self_attn_layer_norm", seqs);
  175. // _forward_ffn(seqs)
  176. residual = seqs;
  177. if (norm_order != TRANSFORMER_NORM_ORDER_POST)
  178. seqs = LayerNorm_forward(model, prefix + ".ffn_layer_norm", seqs);
  179. seqs = StandardFeedForwardNetwork_forward(model, prefix + ".ffn", seqs);
  180. // TODO: if self.residual_scale is not None:
  181. // residual = self.residual_scale * residual
  182. seqs = ggml_add(ctx, seqs, residual);
  183. if (norm_order == TRANSFORMER_NORM_ORDER_POST)
  184. seqs = LayerNorm_forward(model, prefix + ".ffn_layer_norm", seqs);
  185. return seqs;
  186. }
  187. struct ggml_tensor * ggml_slice(
  188. struct ggml_context * ctx,
  189. struct ggml_tensor * a,
  190. int axis,
  191. int64_t start,
  192. int64_t end
  193. ) {
  194. int64_t ne[4];
  195. std::copy(a->ne, a->ne + 4, ne);
  196. if (start < 0) start = ne[axis] + start;
  197. if (end < 0) end = ne[axis] + end;
  198. GGML_ASSERT(0 <= start);
  199. GGML_ASSERT(start <= end);
  200. GGML_ASSERT(end <= ne[axis]);
  201. ne[axis] = end - start;
  202. size_t offset = a->nb[axis] * start;
  203. size_t* nb = a->nb;
  204. ggml_tensor* result = ggml_view_4d(ctx, a, ne[0], ne[1], ne[2], ne[3], nb[1], nb[2], nb[3], offset);
  205. result->n_dims = a->n_dims;
  206. return result;
  207. }
  208. extern "C" ggml_tensor* PositionalEmbedding_forward(
  209. fairseq2_model& model,
  210. const std::string& prefix,
  211. ggml_tensor* embeds
  212. ) {
  213. // This only work with the simple pos encoders
  214. int encoding_dim = embeds->ne[0];
  215. int seq_len = embeds->ne[1];
  216. ggml_tensor* full_pos_embeds = model.tensors[prefix];
  217. ggml_tensor* pos_embeds = ggml_slice(model.ctx, full_pos_embeds, /*axis*/1, 0, seq_len);
  218. return ggml_add(model.ctx, embeds, pos_embeds);
  219. }
  220. extern "C" ggml_tensor* TransformerEmbeddingFrontend_forward(
  221. fairseq2_model& model,
  222. const std::string& prefix,
  223. ggml_tensor* seqs
  224. // TODO: state_bag
  225. ) {
  226. ggml_context* ctx = model.ctx;
  227. ggml_tensor* embed_weights = model.tensors[prefix + ".embed.weight"];
  228. GGML_ASSERT(embed_weights != nullptr);
  229. ggml_tensor* embeds = ggml_get_rows(ctx, embed_weights, seqs);
  230. // padding mask ?
  231. // padding_mask = to_padding_mask(embeds, seq_lens)
  232. if (has_layer(model, prefix + ".pos_encoder")) {
  233. embeds = PositionalEmbedding_forward(model, prefix + ".pos_encoder", embeds);
  234. }
  235. if (has_layer(model, prefix + ".layer_norm")) {
  236. embeds = LayerNorm_forward(model, prefix + ".layer_norm", embeds);
  237. }
  238. return embeds;
  239. }
  240. extern "C" ggml_tensor* StandardTransformerEncoder_forward(
  241. fairseq2_model& model,
  242. const std::string& prefix,
  243. ggml_tensor* seqs,
  244. ggml_tensor* padding_mask
  245. ) {
  246. int layer_idx = 0;
  247. std::string layer_name = prefix + ".layers." + std::to_string(layer_idx);
  248. while (has_layer(model, layer_name)) {
  249. seqs = StandardTransformerEncoderLayer_forward(
  250. model, layer_name, seqs, padding_mask
  251. );
  252. ggml_set_name(seqs, ("x_enc_" + std::to_string(layer_idx)).c_str());
  253. layer_idx += 1;
  254. layer_name = prefix + ".layers." + std::to_string(layer_idx);
  255. }
  256. if (has_layer(model, prefix + ".layer_norm"))
  257. seqs = LayerNorm_forward(model, prefix + ".layer_norm", seqs);
  258. return seqs;
  259. }
  260. extern "C" ggml_tensor* StandardTransformerDecoderLayer_forward(
  261. fairseq2_model& model,
  262. const std::string& prefix,
  263. ggml_tensor* seqs,
  264. ggml_tensor* self_attn_mask,
  265. ggml_tensor* encoder_output,
  266. ggml_tensor* encoder_padding_mask
  267. ) {
  268. ggml_context* ctx = model.ctx;
  269. // TODO: read norm_order from model
  270. auto norm_order = TRANSFORMER_NORM_ORDER_PRE;
  271. // _forward_self_attn(seqs, padding_mask)
  272. auto residual = seqs;
  273. if (norm_order != TRANSFORMER_NORM_ORDER_POST)
  274. seqs = LayerNorm_forward(model, prefix + ".self_attn_layer_norm", seqs);
  275. seqs = MultiheadAttention_forward(
  276. model,
  277. prefix + ".self_attn",
  278. seqs,
  279. seqs,
  280. seqs,
  281. /*attention masks=*/self_attn_mask
  282. );
  283. if (has_layer(model, prefix + ".self_attn_norm"))
  284. seqs = LayerNorm_forward(model, prefix + ".self_attn_norm", seqs);
  285. seqs = ggml_add(ctx, seqs, residual);
  286. if (norm_order == TRANSFORMER_NORM_ORDER_POST)
  287. seqs = LayerNorm_forward(model, prefix + ".self_attn_layer_norm", seqs);
  288. // _forward_encoder_decoder_attn
  289. if (! has_layer(model, prefix + ".encoder_decoder_attn")) {
  290. // `encoder_output` must be `None` for decoder-only attention.
  291. GGML_ASSERT(encoder_output == nullptr);
  292. return seqs;
  293. }
  294. // `encoder_output` must not be `None` for encoder-decoder attention.
  295. GGML_ASSERT(encoder_output != nullptr);
  296. residual = seqs;
  297. if (norm_order != TRANSFORMER_NORM_ORDER_POST)
  298. seqs = LayerNorm_forward(model, prefix + ".encoder_decoder_attn_layer_norm", seqs);
  299. seqs = MultiheadAttention_forward(
  300. model,
  301. prefix + ".encoder_decoder_attn",
  302. seqs,
  303. encoder_output,
  304. encoder_output,
  305. /*attention masks=*/encoder_padding_mask
  306. );
  307. seqs = ggml_add(ctx, seqs, residual);
  308. if (norm_order == TRANSFORMER_NORM_ORDER_POST)
  309. seqs = LayerNorm_forward(model, prefix + ".encoder_decoder_attn_layer_norm", seqs);
  310. // _forward_ffn(seqs)
  311. residual = seqs;
  312. if (norm_order != TRANSFORMER_NORM_ORDER_POST)
  313. seqs = LayerNorm_forward(model, prefix + ".ffn_layer_norm", seqs);
  314. seqs = StandardFeedForwardNetwork_forward(model, prefix + ".ffn", seqs);
  315. // TODO:
  316. // if self.residual_scale is not None:
  317. // residual = self.residual_scale * residual
  318. seqs = ggml_add(ctx, seqs, residual);
  319. if (norm_order == TRANSFORMER_NORM_ORDER_POST)
  320. seqs = LayerNorm_forward(model, prefix + ".ffn_layer_norm", seqs);
  321. return seqs;
  322. }
  323. ggml_tensor* causal_mask_cache = nullptr;
  324. extern "C" ggml_tensor* causal_attention_mask(ggml_context* ctx, ggml_tensor* seqs) {
  325. auto seq_len = seqs->ne[0];
  326. auto mask = causal_mask_cache;
  327. // TODO: this cache only works as long as we don't change the size/device too often
  328. // TODO: allow other ggml_type
  329. if (mask == nullptr || mask->backend != seqs->backend || mask->ne[0] < seq_len) {
  330. printf("new causal_mask (%ld, %ld) created\n", seq_len, seq_len);
  331. mask = ggml_new_tensor_2d(ctx, GGML_TYPE_F32, seq_len, seq_len);
  332. char* data = (char*)mask->data;
  333. // tensor([[0., -inf, -inf, -inf],
  334. // [0., 0., -inf, -inf],
  335. // [0., 0., 0., -inf],
  336. // [0., 0., 0., 0.]])
  337. for (int i = 0; i < seq_len; ++i) {
  338. char* row = data + i * mask->nb[1];
  339. for (int j = 0; j <= i; ++j) {*(float*)(row + j * mask->nb[0]) = 0;}
  340. for (int j = i + 1; j < seq_len; ++j) {*(float*)(row + j * mask->nb[0]) = -INFINITY;}
  341. }
  342. causal_mask_cache = mask;
  343. }
  344. return ggml_view_2d(ctx, mask, seq_len, seq_len, mask->nb[1], 0);
  345. }
  346. extern "C" ggml_tensor* StandardTransformerDecoder_forward(
  347. fairseq2_model& model,
  348. const std::string& prefix,
  349. ggml_tensor* seqs,
  350. ggml_tensor* padding_mask,
  351. ggml_tensor* encoder_output,
  352. ggml_tensor* encoder_padding_mask
  353. ) {
  354. int layer_idx = 0;
  355. std::string layer_name = prefix + ".layers." + std::to_string(layer_idx);
  356. ggml_tensor* self_attn_mask = causal_attention_mask(model.ctx, seqs);
  357. while (has_layer(model, layer_name)) {
  358. seqs = StandardTransformerDecoderLayer_forward(
  359. model, layer_name, seqs, self_attn_mask, encoder_output, encoder_padding_mask
  360. );
  361. ggml_set_name(seqs, ("x_dec_" + std::to_string(layer_idx)).c_str());
  362. layer_idx += 1;
  363. layer_name = prefix + ".layers." + std::to_string(layer_idx);
  364. }
  365. if (has_layer(model, prefix + ".layer_norm"))
  366. seqs = LayerNorm_forward(model, prefix + ".layer_norm", seqs);
  367. return seqs;
  368. }
  369. using IncrementalStateBag = std::unordered_map<ggml_tensor*, ggml_tensor*>*;
  370. int _determine_max_seq_len(const SequenceGeneratorJob& job, int source_seq_len) {
  371. auto opts = job.opts;
  372. int max_seq_len = -1;
  373. if (source_seq_len <= 0 || opts.soft_max_seq_len_a <= 0) {
  374. max_seq_len = opts.hard_max_seq_len;
  375. } else {
  376. max_seq_len = std::min(opts.hard_max_seq_len, int(opts.soft_max_seq_len_a * source_seq_len + opts.soft_max_seq_len_b));
  377. }
  378. if (opts.min_seq_len > max_seq_len) {
  379. printf(
  380. "The effective maximum sequence length must be greater than or equal to `min_seq_len` (%d), but is %d instead. Adjust your soft and hard maximum sequence length limits.\n",
  381. opts.min_seq_len,
  382. max_seq_len
  383. );
  384. GGML_ASSERT(opts.min_seq_len <= max_seq_len);
  385. }
  386. int prefix_seq_len = job.prefix_seq->ne[0];
  387. if (prefix_seq_len >= max_seq_len) {
  388. printf(
  389. "The effective maximum sequence length must be greater than `prefix_seq_len` (%d), but is %d instead.\n",
  390. prefix_seq_len,
  391. max_seq_len
  392. );
  393. GGML_ASSERT(prefix_seq_len < max_seq_len);
  394. }
  395. return max_seq_len;
  396. }
  397. void _fan_out_encoder_output(
  398. ggml_context* ctx,
  399. ggml_tensor** encoder_output_out,
  400. ggml_tensor** encoder_padding_mask_out,
  401. int beam_size
  402. ) {
  403. // (S_enc, M)
  404. ggml_tensor* encoder_output = *encoder_output_out;
  405. ggml_tensor* encoder_padding_mask = *encoder_padding_mask_out;
  406. // (B, S_enc, M)
  407. ggml_tensor* shape = ggml_new_tensor_3d(ctx, GGML_TYPE_I8, encoder_output->ne[0], encoder_output->ne[1], beam_size);
  408. // (S_enc, M) -> (B, S_enc, M)
  409. *encoder_output_out = ggml_repeat(ctx, encoder_output, shape);
  410. // (S_enc) -> (B, S_enc)
  411. ggml_tensor* shape_mask = ggml_new_tensor_2d(ctx, GGML_TYPE_I8, encoder_padding_mask->ne[0], beam_size);
  412. if (encoder_padding_mask != nullptr) {
  413. *encoder_padding_mask_out = ggml_repeat(ctx, encoder_padding_mask, shape_mask);
  414. }
  415. }
  416. ggml_tensor* ggml_log_softmax(ggml_context* ctx, ggml_tensor* logits) {
  417. // TODO: this isn't the smartest way of doing this
  418. return ggml_log(ctx, ggml_soft_max(ctx, logits));
  419. }
  420. void _bootstrap_seqs_and_scores(
  421. fairseq2_model& model,
  422. const SequenceGeneratorJob& job,
  423. ggml_tensor* seqs,
  424. ggml_tensor* scores,
  425. ggml_tensor* encoder_output,
  426. ggml_tensor* encoder_padding_mask,
  427. IncrementalStateBag state_bag
  428. ) {
  429. int prefix_seq_len = job.prefix_seq->ne[0];
  430. int max_seq_len = scores->ne[0];
  431. int beam_size = scores->ne[1];
  432. GGML_ASSERT(prefix_seq_len > 0);
  433. if (prefix_seq_len == 1)
  434. return;
  435. ggml_context* ctx = model.ctx;
  436. // seqs[:, : prefix_seq_len] = job.prefix_seq;
  437. ggml_cpy(ctx, job.prefix_seq, ggml_view_2d(ctx, seqs, 0, prefix_seq_len, seqs->nb[1], 0));
  438. // We have to bootstrap the model with the already fanned-out encoder
  439. // output to correctly initialize its incremental state. This causes some
  440. // redundancy as we have to expand `decoder_input` to match the shape of
  441. // `encoder_output`.
  442. // (S_pfx) -> (N x B, S_pfx - 1)
  443. // prefix_seq[:-1].expand(encoder_output.size(0), -1)
  444. ggml_tensor* decoder_input = ggml_repeat(ctx, ggml_view_1d(ctx, job.prefix_seq, prefix_seq_len - 1, 0), encoder_output);
  445. // Bootstrap the model state with prefix sequence.
  446. ggml_tensor* decoder_output = StandardTransformerDecoder_forward(
  447. model,
  448. "text_decoder",
  449. seqs,
  450. /*padding_mask*/ nullptr,
  451. encoder_output,
  452. encoder_padding_mask
  453. // TODO: state_bag
  454. );
  455. // TODO state_bag.increment_step(prefix_seq_len - 1)
  456. // logits, lprobs: (N, S_pfx - 1, V)
  457. ggml_tensor* logits = Linear_forward(model, "final_proj", decoder_output);
  458. ggml_tensor* lprobs = ggml_log_softmax(ctx, ggml_view_3d(ctx, logits, logits->ne[0], logits->ne[1], 1, 0, 0, 0));
  459. int vocab_size = logits->ne[0];
  460. ggml_cgraph gf = ggml_build_forward(lprobs);
  461. ggml_graph_compute_with_ctx(ctx, &gf, 1);
  462. // Fetch scores of next steps from "lprobs"
  463. float p_score = 0;
  464. for (int i = 0; i < prefix_seq_len; ++i) {
  465. int p = ggml_get_i32_1d(job.prefix_seq, i);
  466. p_score += ggml_get_f32_1d(lprobs, i * vocab_size + p);
  467. for (int b = 0; b < beam_size; ++b) {
  468. // scores: (N, S)
  469. // Note: First step (e.g. BOS)'s score is always 0.
  470. ggml_set_f32_1d(scores, b * max_seq_len + i + 1, p_score);
  471. }
  472. }
  473. }
  474. /// Represents a hypothesis produced by a sequence generator.
  475. struct Hypothesis {
  476. /// The generated sequence.
  477. ggml_tensor* seq;
  478. /// The score of the hypothesis.
  479. float score;
  480. /// The score of each individual sequence step.
  481. ggml_tensor* step_scores;
  482. };
  483. /// Represents a standard beam search algoritm.
  484. int StandardBeamSearch_step(
  485. ggml_context* ctx,
  486. int step_nr,
  487. bool is_start_step,
  488. ggml_tensor* lprobs, // (N, S, V)
  489. ggml_tensor* scores, // (N, S)
  490. ggml_tensor* candidate_indices
  491. ) {
  492. int vocab_size = lprobs->ne[0];
  493. int sent_len = lprobs->ne[1];
  494. int beam_size = lprobs->ne[2];
  495. GGML_ASSERT(scores->ne[0] == sent_len);
  496. GGML_ASSERT(scores->ne[1] == beam_size);
  497. // should this be done by the caller ?
  498. ggml_tensor* last_scores = ggml_view_2d(ctx, scores, beam_size, 1, 0, step_nr);
  499. if (is_start_step) {
  500. // At the initial step, all hypotheses are equally likely, so we use
  501. // only the first beam.
  502. lprobs = ggml_view_3d(ctx, lprobs, vocab_size, sent_len, 1, 0, 0, 0);
  503. lprobs = ggml_cont(ctx, lprobs);
  504. // The first step always indicates the beginning of the sequence and
  505. // has no score.
  506. if (step_nr > 0) {
  507. lprobs = ggml_add(ctx, lprobs, last_scores);
  508. }
  509. } else {
  510. // Make probabilities contain cumulative scores for each hypothesis.
  511. lprobs = ggml_add(ctx, lprobs, last_scores);
  512. }
  513. ggml_cgraph gf = ggml_build_forward(lprobs);
  514. ggml_graph_compute_with_ctx(ctx, &gf, 1);
  515. // Take the best 2 x `beam_size` predictions. We'll choose the first
  516. // `beam_size` of these which don't predict EOS to continue with.
  517. // (N, 2 x B)
  518. // `vocab_size` - 1 to never select PAD.
  519. int topk = std::min(2 * beam_size, vocab_size - 1);
  520. auto comp = [scores](std::int32_t a, std::int32_t b) {
  521. return ggml_get_f32_1d(scores, a) < ggml_get_f32_1d(scores, b);
  522. };
  523. auto cand = (std::int32_t*)candidate_indices->data;
  524. std::partial_sort(cand, cand + topk, cand + (beam_size * vocab_size), comp);
  525. return topk;
  526. }
  527. bool _finalize_hypothesis(
  528. const SequenceGeneratorJob& job,
  529. ggml_context* ctx,
  530. int step_nr,
  531. std::int32_t candidate,
  532. ggml_tensor* seqs, // (beam_size, seq_len)
  533. ggml_tensor* scores, // (beam_size, seq_len)
  534. std::vector<Hypothesis>& hypotheses
  535. ) {
  536. int vocab_size = scores->ne[0];
  537. std::int32_t beam = candidate / vocab_size;
  538. std::int32_t token = candidate % vocab_size;
  539. float tok_score = ggml_get_f32_1d(scores, candidate);
  540. // Detect beams that reached the minimum length and that end with an EOS.
  541. bool eos = token == job.eos_idx;
  542. eos &= tok_score != -INFINITY;
  543. // TODO ignored_beam_mask ?
  544. // eos &= ggml_get_i32_1d(ignored_beam_mask, beam);
  545. // ggml_set_i32_1d(eos_mask, beam, eos);
  546. if (!eos) return false;
  547. // If the candidate beam is "finished", let's copy the score and sequence
  548. ggml_tensor* tokens = ggml_new_tensor_1d(ctx, GGML_TYPE_I32, step_nr + 2);
  549. ggml_tensor* step_scores = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, step_nr + 2);
  550. auto tok = (std::int32_t*)tokens->data;
  551. auto sc = (float*)step_scores->data;
  552. ggml_set_f32_1d(scores, scores->ne[0] * beam + step_nr + 1, tok_score);
  553. for (int i = 0; i < step_nr + 1; ++i) {
  554. tok[i] = ggml_get_i32_1d(seqs, seqs->ne[0] * beam + i);
  555. }
  556. tok[step_nr + 1] = token;
  557. float last_score = tok_score;
  558. for (int i = step_nr; i >= 0; --i) {
  559. // Convert from cumulative to per-step scores.
  560. float sc0 = ggml_get_f32_1d(scores, scores->ne[0] * beam + i + 0);
  561. sc[i] = last_score - sc0;
  562. last_score = sc0;
  563. }
  564. // Skip first EOS since it is always 0 and skews normalization.
  565. if (job.opts.normalize_scores)
  566. tok_score /= std::pow((step_nr + 1), job.opts.len_penalty);
  567. hypotheses.emplace_back(Hypothesis{tokens, tok_score, step_scores});
  568. return true;
  569. }
  570. /// Generates a translation for a single sequence
  571. // TODO: finish this for beam_size=1
  572. // * implement the lprobs tweaking
  573. // TODO: add IncrementalStateBag support to avoid a O(N^3) generation.
  574. // TODO: support beam_size > 1:
  575. // * most layers assume un-batched input, but we want to handle several beams at once
  576. // * need to port "reorder_state_dict"
  577. // * once beam are selected with topk, we need to update seqs and scores tensors
  578. extern "C" float generate_sequence(
  579. fairseq2_model& model,
  580. const SequenceGeneratorJob& job,
  581. ggml_tensor* encoder_output,
  582. ggml_tensor* encoder_padding_mask,
  583. ggml_tensor* output_seq
  584. ) {
  585. int vocab_size = encoder_output->ne[0];
  586. int beam_size = job.opts.beam_size;
  587. int source_seq_len = encoder_output->ne[1];
  588. int max_seq_len = _determine_max_seq_len(job, source_seq_len);
  589. ggml_context* ctx = model.ctx;
  590. // (S_enc, M) -> (B, S_enc, M)
  591. _fan_out_encoder_output(ctx, &encoder_output, &encoder_padding_mask, beam_size);
  592. std::vector<Hypothesis> finished_searches(beam_size);
  593. // Initialize buffers. (B, S)
  594. ggml_tensor* seqs = ggml_new_tensor_2d(ctx, GGML_TYPE_I32, max_seq_len, beam_size);
  595. ggml_set_i32(seqs, 0);
  596. ggml_tensor* scores = ggml_new_tensor_2d(ctx, GGML_TYPE_F32, max_seq_len, beam_size);
  597. ggml_set_f32(scores, 0.0);
  598. IncrementalStateBag state_bag = {};
  599. _bootstrap_seqs_and_scores(
  600. model, job, seqs, scores, encoder_output, encoder_padding_mask, state_bag
  601. );
  602. int prefix_seq_len = job.prefix_seq->ne[0];
  603. int start_step = prefix_seq_len - 1;
  604. // Holds the indices of beams (a beam can occur more than once) that we
  605. // should continue with in the next step.
  606. ggml_tensor* beam_indices = nullptr;
  607. // Indices of next token
  608. ggml_tensor* candidate_indices = ggml_new_tensor_1d(ctx, GGML_TYPE_I32, vocab_size * beam_size);
  609. for (int i = 0; i < vocab_size * beam_size; ++i) ggml_set_i32_1d(candidate_indices, i, i);
  610. // Holds the indices of searches that we should continue with in the next
  611. // step. If not `None`, it means we finalized one or more searches in the
  612. // last step.
  613. ggml_tensor* search_indices = nullptr;
  614. for (int step_nr = start_step; step_nr < max_seq_len - 1; ++step_nr) {
  615. // if (beam_indices != nullptr) {
  616. // // If not `None`, it means in the last step we finalized one or
  617. // // more searches. We should ensure that we adjust `beam_indices`
  618. // // before reordering `decoder`'s incremental state.
  619. // if (search_indices != nullptr) {
  620. // num_searches = search_indices->ne[0];
  621. // // (N)
  622. // delta = search_indices - torch.arange(num_searches, device=device)
  623. // // (N) -> (N, 1)
  624. // delta.unsqueeze_(-1)
  625. // // Adjust indices to take into account removed searches.
  626. // beam_indices.view(num_searches, beam_size).add_(delta * beam_size)
  627. // }
  628. // // state_bag.reorder(beam_indices)
  629. // }
  630. seqs = TransformerEmbeddingFrontend_forward(model, "text_decoder_frontend", seqs);
  631. ggml_tensor* decoder_output = StandardTransformerDecoder_forward(
  632. model,
  633. "text_decoder",
  634. // seqs[:, step_nr : step_nr + 1]
  635. ggml_view_2d(ctx, seqs, 1, beam_size, step_nr * seqs->nb[0], 0),
  636. nullptr, // We never generate PAD.
  637. encoder_output,
  638. encoder_padding_mask
  639. // state_bag=state_bag,
  640. );
  641. // state_bag.increment_step()
  642. ggml_tensor* logits = Linear_forward(model, "final_proj", decoder_output);
  643. ggml_tensor* lprobs = ggml_log_softmax(ctx, logits);
  644. // // Do not allow EOS before reaching the minimum sequence length.
  645. // if step_nr < self.opts.min_seq_len:
  646. // lprobs[:, :, self.eos_idx] = -torch.inf
  647. // // If we have reached the maximum length, force the last step to be
  648. // // EOS.
  649. // if step_nr == max_seq_len - 2:
  650. // lprobs[:, :, : self.eos_idx] = -torch.inf
  651. // lprobs[:, :, self.eos_idx + 1 :] = -torch.inf
  652. // // Never allow PAD.
  653. // lprobs[:, :, self.pad_idx] = -torch.inf
  654. // // Apply UNK penalty.
  655. // if self.unk_idx is not None:
  656. // lprobs[:, :, self.unk_idx] -= self.opts.unk_penalty
  657. // Determine candidates for the next step.
  658. // (N, 2 x B)
  659. int topk = StandardBeamSearch_step(
  660. ctx,
  661. step_nr,
  662. step_nr == start_step,
  663. lprobs,
  664. // TODO only pass scores for new tokens
  665. ggml_view_2d(ctx, scores, step_nr + 1, beam_size, 0, 0),
  666. candidate_indices
  667. );
  668. int ongoing_beams = 0;
  669. for (std::int32_t c = 0; c < topk; ++c) {
  670. bool finished = _finalize_hypothesis(job, ctx, step_nr, c, seqs, scores, finished_searches);
  671. if (!finished) ongoing_beams += 1;
  672. if (ongoing_beams >= beam_size) break;
  673. }
  674. if (finished_searches.size() == beam_size) break;
  675. // TODO: recreate scores and seqs with the best beams
  676. // Remove finished searches (ones for which `beam_size` finalized
  677. // beams have been generated) from the batch.
  678. ggml_tensor* search_indices = nullptr;
  679. // if (newly_finished_searches) {
  680. // new_num_searches = num_searches - len(newly_finished_searches)
  681. // // Construct `search_indices` which holds indices of searches
  682. // // to keep for the next step.
  683. // search_mask = torch.full((num_searches,), True, device=device)
  684. // search_mask[newly_finished_searches] = False
  685. // search_indices = torch.arange(num_searches, device=device)
  686. // search_indices = search_indices.masked_select(search_mask)
  687. // // Filter out removed batches from state variables.
  688. // // (N, B) -> (N - F, B)
  689. // ignored_beam_mask = ignored_beam_mask[search_indices]
  690. // // (N, 2 x B) -> (N - F, 2 x B)
  691. // cand_scores = cand_scores [search_indices]
  692. // cand_indices = cand_indices [search_indices]
  693. // cand_beam_indices = cand_beam_indices[search_indices]
  694. // // (N) -> (N - F)
  695. // search_offsets.resize_(new_num_searches, 1)
  696. // // (N - F, 2 x B) + (N - F) -> (N - F, 2 x B)
  697. // global_cand_beam_indices = cand_beam_indices + search_offsets
  698. // // (N, 2 x B) -> (N - F, 2 x B)
  699. // eos_mask = eos_mask[search_indices]
  700. // // (N x B, S) -> (N, B, S)
  701. // seqs = seqs .view(num_searches, -1)
  702. // scores = scores.view(num_searches, -1)
  703. // // (N, B, S + 1) -> ((N - F) x B, S)
  704. // seqs = seqs [search_indices].view(new_num_searches * beam_size, -1)
  705. // scores = scores[search_indices].view(new_num_searches * beam_size, -1)
  706. // // (N x B, S_enc, M) -> (N, B, S_enc, M)
  707. // encoder_output = encoder_output.unflatten(0, (num_searches, -1))
  708. // // (N, B, S_enc, M) -> ((N - F) x B, S_enc, M)
  709. // encoder_output = encoder_output[search_indices].flatten(0, 1)
  710. // if encoder_padding_mask is not None:
  711. // // (N x B, S_enc, M) -> (N, B, S_enc, M)
  712. // padding_mask = encoder_padding_mask.unflatten(0, (num_searches, -1))
  713. // // (N, B, S_enc, M) -> ((N - F) x B, S_enc, M)
  714. // encoder_padding_mask = padding_mask[search_indices].flatten(0, 1)
  715. // num_searches = new_num_searches
  716. // }
  717. // eos_mask[:, :beam_size][ignored_beam_mask] = True
  718. // // Set `beam_weights` so that values greater than or equal to 2 x
  719. // // `beam_size` indicate finished beams (i.e. end with EOS) and values
  720. // // less than 2 x `beam_size` indicate active beams.
  721. // // (N, 2 x B)
  722. // beam_weights = cand_offsets + (eos_mask * (2 * beam_size))
  723. // // Get the top `beam_size` active beams, which are the beams with the
  724. // // smallest weights in `active_beam_weights`.
  725. // // (N, B)
  726. // active_beam_weights, active_beams = torch.topk(
  727. // beam_weights, k=beam_size, dim=1, largest=False
  728. // )
  729. // // Update to ignore finalized beams in the next step.
  730. // // (N, B)
  731. // ignored_beam_mask = active_beam_weights >= 2 * beam_size
  732. // // We should always have at least one active beam in each search.
  733. // assert (~ignored_beam_mask).any(dim=1).all()
  734. // // Denotes which beams are continued for each new hypothesis (a beam
  735. // // can be selected more than once).
  736. // // (N, B)
  737. // beam_indices = torch.gather(
  738. // global_cand_beam_indices, dim=1, index=active_beams
  739. // )
  740. // // (N, B) -> (N x B)
  741. // beam_indices = beam_indices.view(-1)
  742. // // Reorder beams in the `seq` and `score` buffers. The same beam can
  743. // // be selected more than once.
  744. // if (step_nr > start_step) {
  745. // // seqs [:, : step_nr + 1] = torch.index_select(
  746. // // seqs [:, : step_nr + 1], dim=0, index=beam_indices
  747. // // )
  748. // // scores[:, : step_nr + 1] = torch.index_select(
  749. // // scores[:, : step_nr + 1], dim=0, index=beam_indices
  750. // // )
  751. // }
  752. // // (N x B, S) -> (N, B, S)
  753. // // seqs_view = seqs .view(num_searches, beam_size, -1)
  754. // // scores_view = scores.view(num_searches, beam_size, -1)
  755. // // seqs_view [:, :, step_nr + 1] = torch.gather(cand_indices, dim=1, index=active_beams)
  756. // // scores_view[:, :, step_nr + 1] = torch.gather(cand_scores, dim=1, index=active_beams)
  757. }
  758. // Ensure that hypotheses are sorted by their scores before returning.
  759. // for batch in finished_searches:
  760. // batch.sort(key=lambda b: b.score, reverse=True) # type: ignore[arg-type, return-value]
  761. // return SequenceGeneratorOutput(
  762. // results=finished_searches, device=device, pad_idx=self.pad_idx
  763. // )
  764. return 0.0f;
  765. }