|
@@ -23,9 +23,7 @@ def add_quantization_args(parser):
|
|
|
group = parser.add_argument_group("Quantization")
|
|
|
|
|
|
group.add_argument("--quantization-bit-width", type=int, default=None)
|
|
|
- group.add_argument(
|
|
|
- "--load-from-quantized-checkpoint", action="store_true", help="Loading from a quantized checkpoint"
|
|
|
- )
|
|
|
+ group.add_argument("--from-quantized-checkpoint", action="store_true", help="Loading from a quantized checkpoint")
|
|
|
|
|
|
|
|
|
def initialize(extra_args_provider):
|
|
@@ -48,7 +46,7 @@ def initialize_model_and_tokenizer(args):
|
|
|
# Initialize model
|
|
|
model = GLM130B(args).half()
|
|
|
|
|
|
- if args.load_from_quantized_checkpoint:
|
|
|
+ if args.from_quantized_checkpoint:
|
|
|
assert not args.bminf and args.quantization_bit_width is not None
|
|
|
# Quantize model before moving to GPU
|
|
|
model = quantize(model, args.quantization_bit_width)
|
|
@@ -67,7 +65,7 @@ def initialize_model_and_tokenizer(args):
|
|
|
with torch.cuda.device(args.device):
|
|
|
model = bminf.wrapper(model, quantization=False, memory_limit=args.bminf_memory_limit << 30)
|
|
|
else:
|
|
|
- if args.quantization_bit_width is not None and not args.load_from_quantized_checkpoint:
|
|
|
+ if args.quantization_bit_width is not None and not args.from_quantized_checkpoint:
|
|
|
# Quantize model before moving to GPU
|
|
|
model = quantize(model, args.quantization_bit_width)
|
|
|
model = model.to(args.device)
|