predict.py 2.7 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889
  1. # Copyright (c) Meta Platforms, Inc. and affiliates.
  2. #
  3. # This source code is licensed under the MIT license found in the
  4. # LICENSE file in the root directory of this source tree.
  5. import argparse
  6. import logging
  7. import torch
  8. import torchaudio
  9. from seamless_communication.models.inference import Translator
  10. logging.basicConfig(
  11. level=logging.INFO,
  12. format="%(asctime)s %(levelname)s -- %(name)s: %(message)s",
  13. )
  14. logger = logging.getLogger(__name__)
  15. def main():
  16. parser = argparse.ArgumentParser(
  17. description="M4T inference on supported tasks using Translator."
  18. )
  19. parser.add_argument("input", type=str, help="Audio WAV file path or text input.")
  20. parser.add_argument("task", type=str, help="Task type")
  21. parser.add_argument(
  22. "tgt_lang", type=str, help="Target language to translate/transcribe into."
  23. )
  24. parser.add_argument(
  25. "--src_lang",
  26. type=str,
  27. help="Source language, only required if input is text.",
  28. default=None,
  29. )
  30. parser.add_argument(
  31. "--output_path",
  32. type=str,
  33. help="Path to save the generated audio.",
  34. default=None,
  35. )
  36. parser.add_argument(
  37. "--model_name",
  38. type=str,
  39. help="Base model name (`seamlessM4T_medium`, `seamlessM4T_large`)",
  40. default="seamlessM4T_large",
  41. )
  42. parser.add_argument(
  43. "--vocoder_name", type=str, help="Vocoder name", default="vocoder_36langs"
  44. )
  45. parser.add_argument(
  46. "--ngram-filtering",
  47. type=bool,
  48. help="Enable ngram_repeat_block (currently hardcoded to 4, during decoding) and ngram filtering over units (postprocessing)",
  49. default=False,
  50. )
  51. args = parser.parse_args()
  52. if args.task.upper() in {"S2ST", "T2ST"} and args.output_path is None:
  53. raise ValueError("output_path must be provided to save the generated audio")
  54. if torch.cuda.is_available():
  55. device = torch.device("cuda:0")
  56. logger.info("Running inference on the GPU.")
  57. else:
  58. device = torch.device("cpu")
  59. logger.info("Running inference on the CPU.")
  60. translator = Translator(args.model_name, args.vocoder_name, device)
  61. translated_text, wav, sr = translator.predict(
  62. args.input,
  63. args.task,
  64. args.tgt_lang,
  65. src_lang=args.src_lang,
  66. ngram_filtering=args.ngram_filtering,
  67. )
  68. if wav is not None and sr is not None:
  69. logger.info(f"Saving translated audio in {args.tgt_lang}")
  70. torchaudio.save(
  71. args.output_path,
  72. wav[0].cpu(),
  73. sample_rate=sr,
  74. )
  75. logger.info(f"Translated text in {args.tgt_lang}: {translated_text}")
  76. if __name__ == "__main__":
  77. main()