16 lines
827 B
Python
16 lines
827 B
Python
from transformers import AutoProcessor, SeamlessM4Tv2Model
|
|
|
|
processor = AutoProcessor.from_pretrained("facebook/seamless-m4t-v2-large")
|
|
model = SeamlessM4Tv2Model.from_pretrained("facebook/seamless-m4t-v2-large")
|
|
|
|
def translator(subtitle_line: str, target_language: str) -> str:
|
|
# Ensure the model and processor are loaded to the GPU
|
|
model.to('cuda')
|
|
# Move input tensors to GPU
|
|
text_inputs = processor(text=subtitle_line, src_lang="eng", return_tensors="pt")
|
|
text_inputs = {key: value.to('cuda') for key, value in text_inputs.items()}
|
|
# Generate output tokens on GPU
|
|
output_tokens = model.generate(**text_inputs, tgt_lang=target_language, num_beams=5, generate_speech=False)
|
|
# Decode the result
|
|
return processor.decode(output_tokens[0].tolist()[0], skip_special_tokens=True)
|