predict.py 2.6 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586
  1. # Copyright (c) Meta Platforms, Inc. and affiliates.
  2. #
  3. # This source code is licensed under the MIT license found in the
  4. # LICENSE file in the root directory of this source tree.
  5. import argparse
  6. import logging
  7. import torch
  8. import torchaudio
  9. from seamless_communication.models.inference import Translator
  10. logging.basicConfig(level=logging.INFO)
  11. logger = logging.getLogger(__name__)
  12. def main():
  13. parser = argparse.ArgumentParser(
  14. description="M4T inference on supported tasks using Translator."
  15. )
  16. parser.add_argument("input", type=str, help="Audio WAV file path or text input.")
  17. parser.add_argument("task", type=str, help="Task type")
  18. parser.add_argument(
  19. "tgt_lang", type=str, help="Target language to translate/transcribe into."
  20. )
  21. parser.add_argument(
  22. "--src_lang",
  23. type=str,
  24. help="Source language, only required if input is text.",
  25. default=None,
  26. )
  27. parser.add_argument(
  28. "--output_path",
  29. type=str,
  30. help="Path to save the generated audio.",
  31. default=None,
  32. )
  33. parser.add_argument(
  34. "--model_name",
  35. type=str,
  36. help="Base model name (`seamlessM4T_medium`, `seamlessM4T_large`)",
  37. default="seamlessM4T_large",
  38. )
  39. parser.add_argument(
  40. "--vocoder_name", type=str, help="Vocoder name", default="vocoder_36langs"
  41. )
  42. parser.add_argument(
  43. "--ngram-filtering",
  44. type=bool,
  45. help="Enable ngram_repeat_block (currently hardcoded to 4, during decoding) and ngram filtering over units (postprocessing)",
  46. default=False,
  47. )
  48. args = parser.parse_args()
  49. if args.task.upper() in {"S2ST", "T2ST"} and args.output_path is None:
  50. raise ValueError("output_path must be provided to save the generated audio")
  51. if torch.cuda.is_available():
  52. device = torch.device("cuda:0")
  53. logger.info("Running inference on the GPU.")
  54. else:
  55. device = torch.device("cpu")
  56. logger.info("Running inference on the CPU.")
  57. translator = Translator(args.model_name, args.vocoder_name, device)
  58. translated_text, wav, sr = translator.predict(
  59. args.input,
  60. args.task,
  61. args.tgt_lang,
  62. src_lang=args.src_lang,
  63. ngram_filtering=args.ngram_filtering,
  64. )
  65. if wav is not None and sr is not None:
  66. logger.info(f"Saving translated audio in {args.tgt_lang}")
  67. torchaudio.save(
  68. args.output_path,
  69. wav[0].cpu(),
  70. sample_rate=sr,
  71. )
  72. logger.info(f"Translated text in {args.tgt_lang}: {translated_text}")
  73. if __name__ == "__main__":
  74. main()