import gradio as gr import sys import logging from huggingsound import SpeechRecognitionModel from transformers import pipeline, AutoModelForCTC, Wav2Vec2Processor, Wav2Vec2ProcessorWithLM # COPYPASTED FROM: https://huggingface.co/spaces/jonatasgrosman/asr/blob/main/app.py logging.basicConfig( format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", datefmt="%m/%d/%Y %H:%M:%S", handlers=[logging.StreamHandler(sys.stdout)], ) logger = logging.getLogger(__name__) logger.setLevel(logging.DEBUG) model_ID = "jonatasgrosman/wav2vec2-large-xlsr-53-russian" CACHED_MODEL = {"rus": AutoModelForCTC.from_pretrained(model_ID)} def run(input_file, history, model_size="300M"): language = "Russian" decoding_type = "LM" logger.info(f"Running ASR {language}-{model_size}-{decoding_type} for {input_file}") # history = history or [] # the history seems to be not by session anymore, so I'll deactivate this for now history = [] model_instance = CACHED_MODEL.get("rus") if decoding_type == "LM": processor = Wav2Vec2ProcessorWithLM.from_pretrained("jonatasgrosman/wav2vec2-large-xlsr-53-russian") asr = pipeline("automatic-speech-recognition", model=model_instance, tokenizer=processor.tokenizer, feature_extractor=processor.feature_extractor, decoder=processor.decoder) else: processor = Wav2Vec2Processor.from_pretrained("jonatasgrosman/wav2vec2-large-xlsr-53-russian") asr = pipeline("automatic-speech-recognition", model=model_instance, tokenizer=processor.tokenizer, feature_extractor=processor.feature_extractor, decoder=None) transcription = asr(input_file.name, chunk_length_s=5, stride_length_s=1)["text"] logger.info(f"Transcription for {language}-{model_size}-{decoding_type} for {input_file}: {transcription}") history.append({ "model_id": model_ID, "language": language, "model_size": model_size, "decoding_type": decoding_type, "transcription": transcription, "error_message": None }) html_output = "