model.py 7.8 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175
  1. import torch
  2. from typing import List, Union
  3. from SwissArmyTransformer.generation.autoregressive_sampling import update_mems, get_masks_and_position_ids_default
  4. def batch_filling_sequence(
  5. model,
  6. seqs,
  7. context_lengths,
  8. strategy,
  9. max_memory_length=100000,
  10. get_masks_and_position_ids=get_masks_and_position_ids_default,
  11. mems=None,
  12. **kw_args
  13. ):
  14. '''
  15. seq: [2, 3, 5, ..., -1(to be generated), -1, ...]
  16. mems: [num_layers, batch_size, len_mems(index), mem_hidden_size]
  17. cache, should be first mems.shape[1] parts of context_tokens.
  18. mems are the first-level citizens here, but we don't assume what is memorized.
  19. input mems are used when multi-phase generation.
  20. '''
  21. assert len(seqs.shape) == 2
  22. # building the initial tokens, attention_mask, and position_ids
  23. batch_size, context_length = seqs.shape
  24. seqs, attention_mask, position_ids = get_masks_and_position_ids(seqs)
  25. tokens = seqs[..., :context_length]
  26. if attention_mask.dtype != torch.bool:
  27. attention_mask = attention_mask.type_as(next(model.parameters())) # if fp16
  28. # initialize generation
  29. counter = context_length - 1 # Last fixed index is ``counter''
  30. index = 0 if mems is None else mems.shape[2] # Next forward starting index, also the length of cache.
  31. num_beams = 1
  32. # step-by-step generation
  33. while counter < seqs.shape[1] - 1:
  34. # Now, we want to generate seq[counter + 1],
  35. # token[:, index: counter+1] needs forwarding.
  36. # forward
  37. if num_beams > 1:
  38. tokens = tokens.reshape(batch_size * num_beams, -1)
  39. mems = mems.reshape(mems.shape[0], batch_size * num_beams, mems.shape[-2], mems.shape[-1])
  40. logits, *output_per_layers = model(
  41. tokens[:, index:],
  42. position_ids[..., index: counter+1],
  43. attention_mask[..., index: counter+1, :counter+1], # TODO memlen
  44. mems=mems,
  45. **kw_args
  46. )
  47. mem_kv = [o['mem_kv'] for o in output_per_layers]
  48. mems = update_mems(mem_kv, mems, max_memory_length=max_memory_length)
  49. if counter == context_length - 1:
  50. logits = logits[torch.arange(batch_size), context_lengths - 1]
  51. else:
  52. logits = logits[:, -1]
  53. counter += 1
  54. index = counter
  55. # sampling
  56. if num_beams > 1:
  57. logits = logits.reshape(batch_size, num_beams, -1)
  58. tokens = tokens.reshape(batch_size, num_beams, -1)
  59. mems = mems.reshape(mems.shape[0], batch_size, num_beams, mems.shape[-2], mems.shape[-1])
  60. tokens, mems = strategy.forward(logits, tokens, mems)
  61. if len(tokens.shape) == 3 and num_beams == 1:
  62. num_beams = tokens.shape[1]
  63. position_ids = position_ids.unsqueeze(1).expand(batch_size, num_beams, -1).reshape(batch_size * num_beams, -1)
  64. attention_mask_shape = attention_mask.shape[-3:]
  65. attention_mask = attention_mask.unsqueeze(1).expand(batch_size, num_beams, -1, -1, -1).reshape(
  66. batch_size * num_beams, *attention_mask_shape)
  67. if strategy.is_done:
  68. break
  69. return strategy.finalize(tokens, mems)
  70. class ModelForEvaluation(torch.nn.Module):
  71. def __init__(self, model):
  72. super().__init__()
  73. self.model = model
  74. @staticmethod
  75. def process_data(batch):
  76. return (
  77. batch["tokens"].to(device=torch.cuda.current_device()).long(),
  78. batch["position_ids"].to(device=torch.cuda.current_device()).long(),
  79. batch["attention_mask"].to(device=torch.cuda.current_device()).bool().unsqueeze(1),
  80. )
  81. def cond_log_prob(self, batch) -> List[List[float]]:
  82. """
  83. @return: Conditional log probability of each option
  84. """
  85. tokens, position_ids, attention_mask = self.process_data(batch)
  86. choices_batch, choice_target_ids_batch = batch["choices"], batch["choice_target_ids"]
  87. is_single_token = batch["is_single_token"]
  88. self.model.eval()
  89. with torch.no_grad():
  90. logits, *output_per_layers = self.model(tokens, position_ids, attention_mask, log_attention_weights=None)
  91. logits_batch = torch.nn.functional.log_softmax(logits, dim=-1)
  92. # output: [b, sq, vocab]
  93. log_probs = []
  94. if is_single_token: # Single token
  95. for logits, choices, choice_target_ids in zip(logits_batch, choices_batch, choice_target_ids_batch):
  96. log_probs.append(logits[choice_target_ids[0], choices].tolist())
  97. else: # Multi token
  98. for output, choices, choice_target_ids in zip(logits_batch, choices_batch, choice_target_ids_batch):
  99. log_probs_single = []
  100. for choice, choice_target_id in zip(choices, choice_target_ids):
  101. tmp = output[choice_target_id, choice]
  102. log_probs_single.append(tmp.sum().tolist())
  103. log_probs.append(log_probs_single)
  104. return log_probs
  105. def generate_text(self, sample, strategy, return_all_beams=False, max_gen_length=128) -> Union[
  106. List[int], List[List[int]]]:
  107. """
  108. @return: A list of text model generated, sorted by score in descending order
  109. """
  110. seqs = sample["tokens"].to(device=torch.cuda.current_device()).long()
  111. context_lengths = sample["context_length"].long()
  112. def get_masks_and_position_ids(seq):
  113. batch_size = seq.shape[0]
  114. tokens = torch.nn.functional.pad(seq, (0, max_gen_length), mode='constant', value=-1)
  115. position_ids = torch.cat((sample['position_ids'], sample['target_position_ids']), dim=-1)
  116. position_ids = position_ids.to(device=torch.cuda.current_device()).long()
  117. attention_mask = sample["attention_mask"].to(device=torch.cuda.current_device())
  118. context_mask = attention_mask[torch.arange(batch_size), context_lengths - 1].unsqueeze(1).repeat(1,
  119. max_gen_length,
  120. 1)
  121. causal_mask = torch.tril(context_mask.new_ones((batch_size, max_gen_length, max_gen_length))) < 0.5
  122. generation_mask = torch.cat(
  123. (context_mask, causal_mask), dim=-1)
  124. attention_mask = torch.nn.functional.pad(attention_mask, (0, max_gen_length), mode='constant', value=1)
  125. attention_mask = torch.cat((attention_mask, generation_mask), dim=1)
  126. attention_mask = attention_mask.bool().unsqueeze(1)
  127. return tokens, attention_mask, position_ids
  128. self.model.eval()
  129. with torch.no_grad():
  130. output = batch_filling_sequence(
  131. self.model,
  132. seqs,
  133. context_lengths,
  134. get_masks_and_position_ids=get_masks_and_position_ids,
  135. strategy=strategy,
  136. )[0]
  137. if isinstance(output, torch.Tensor): # different strategies
  138. output = list(output)
  139. output_targets = []
  140. context_length = seqs.shape[1]
  141. for lines in output:
  142. output_target = []
  143. if not isinstance(lines, list):
  144. lines = [lines]
  145. for line in lines:
  146. line = line.tolist()
  147. unfinished = line.index(-1) if -1 in line else len(line)
  148. if line[unfinished - 1] in strategy.end_tokens:
  149. unfinished -= 1
  150. line = line[context_length:unfinished]
  151. output_target.append(line)
  152. if not return_all_beams:
  153. output_targets.append(output_target[0])
  154. else:
  155. output_targets.append(output_target)
  156. return output_targets