convert-h5-to-ggml.py 3.5 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116
  1. import sys
  2. import struct
  3. import json
  4. import numpy as np
  5. from transformers import AutoModelForCausalLM, AutoTokenizer
  6. if len(sys.argv) < 3:
  7. print("Usage: convert-h5-to-ggml.py dir-model [use-f32]\n")
  8. print(" ftype == 0 -> float32")
  9. print(" ftype == 1 -> float16")
  10. sys.exit(1)
  11. # output in the same directory as the model
  12. dir_model = sys.argv[1]
  13. fname_out = sys.argv[1] + "/ggml-model.bin"
  14. with open(dir_model + "/tokenizer.json", "r", encoding="utf-8") as f:
  15. encoder = json.load(f)
  16. with open(dir_model + "/config.json", "r", encoding="utf-8") as f:
  17. hparams = json.load(f)
  18. # possible data types
  19. # ftype == 0 -> float32
  20. # ftype == 1 -> float16
  21. #
  22. # map from ftype to string
  23. ftype_str = ["f32", "f16"]
  24. ftype = 1
  25. if len(sys.argv) > 2:
  26. ftype = int(sys.argv[2])
  27. if ftype < 0 or ftype > 1:
  28. print("Invalid ftype: " + str(ftype))
  29. sys.exit(1)
  30. fname_out = sys.argv[1] + "/ggml-model-" + ftype_str[ftype] + ".bin"
  31. tokenizer = AutoTokenizer.from_pretrained(dir_model)
  32. model = AutoModelForCausalLM.from_pretrained(dir_model, low_cpu_mem_usage=True)
  33. #print (model)
  34. #print(tokenizer.encode('I believe the meaning of life is'))
  35. list_vars = model.state_dict()
  36. for name in list_vars.keys():
  37. print(name, list_vars[name].shape, list_vars[name].dtype)
  38. fout = open(fname_out, "wb")
  39. print(hparams)
  40. fout.write(struct.pack("i", 0x67676d6c)) # magic: ggml in hex
  41. fout.write(struct.pack("i", hparams["vocab_size"]))
  42. fout.write(struct.pack("i", hparams["max_position_embeddings"]))
  43. fout.write(struct.pack("i", hparams["hidden_size"]))
  44. fout.write(struct.pack("i", hparams["num_attention_heads"]))
  45. fout.write(struct.pack("i", hparams["num_hidden_layers"]))
  46. fout.write(struct.pack("i", int(hparams["rotary_pct"]*(hparams["hidden_size"]//hparams["num_attention_heads"]))))
  47. fout.write(struct.pack("i", hparams["use_parallel_residual"]))
  48. fout.write(struct.pack("i", ftype))
  49. # TODO: temporary hack to not deal with implementing the tokenizer
  50. dot_token = tokenizer.encode('.')[0]
  51. for i in range(hparams["vocab_size"]):
  52. text = tokenizer.decode([dot_token, i]).encode('utf-8')
  53. # remove the first byte (it's always '.')
  54. text = text[1:]
  55. fout.write(struct.pack("i", len(text)))
  56. fout.write(text)
  57. for name in list_vars.keys():
  58. data = list_vars[name].squeeze().numpy()
  59. print("Processing variable: " + name + " with shape: ", data.shape)
  60. # we don't need these
  61. if name.endswith(".attention.masked_bias") or \
  62. name.endswith(".attention.bias") or \
  63. name.endswith(".attention.rotary_emb.inv_freq"):
  64. print(" Skipping variable: " + name)
  65. continue
  66. n_dims = len(data.shape);
  67. # ftype == 0 -> float32, ftype == 1 -> float16
  68. ftype_cur = 0;
  69. if ftype != 0:
  70. if name[-7:] == ".weight" and n_dims == 2:
  71. print(" Converting to float16")
  72. data = data.astype(np.float16)
  73. ftype_cur = 1
  74. else:
  75. print(" Converting to float32")
  76. data = data.astype(np.float32)
  77. ftype_cur = 0
  78. else:
  79. if data.dtype != np.float32:
  80. print(" Converting to float32")
  81. data = data.astype(np.float32)
  82. ftype_cur = 0
  83. # header
  84. str = name.encode('utf-8')
  85. fout.write(struct.pack("iii", n_dims, len(str), ftype_cur))
  86. for i in range(n_dims):
  87. fout.write(struct.pack("i", data.shape[n_dims - 1 - i]))
  88. fout.write(str);
  89. # data
  90. data.tofile(fout)
  91. fout.close()
  92. print("Done. Output file: " + fname_out)
  93. print("")