transcriptifier-st-hf7 / deeppunkt.py
wldmr's picture
init
68d26c9
raw
history blame
3.02 kB
from deepmultilingualpunctuation import PunctuationModel
import re
import metrics
def remove_filler_words(transcript):
# preserve line brakes
transcript_hash = " # ".join(transcript.strip().splitlines())
# preprocess the text by removing filler words
# Define a list of filler words to remove
filler_words = ["um", "uh", "hmm", "ha", "er", "ah", "yeah"]
words = transcript_hash.split()
clean_words = [word for word in words if word.lower() not in filler_words]
input_text_clean = ' '.join(clean_words)
# restore the line brakes
input_text= input_text_clean.replace(' # ','\n')
return input_text
# Define a regular expression pattern that matches any filler word surrounded by whitespace or punctuation
#pattern = r"(?<=\s|\b)(" + "|".join(fillers) + r")(?=\s|\b)"
# Use re.sub to replace the filler words with empty strings
#clean_input_text = re.sub(pattern, "", input_text)
def predict(brakes, transcript):
input_text = remove_filler_words(transcript)
# Do the punctuation restauration
model = PunctuationModel()
output_text = model.restore_punctuation(input_text)
# if any of the line brake methods are implemented,
# return the text as a single line
pcnt_file_cr = output_text
if 'textlines' in brakes:
# preserve line brakes
srt_file_hash = '# '.join(input_text.strip().splitlines())
#srt_file_sub=re.sub('\s*\n\s*','# ',srt_file_strip)
srt_file_array=srt_file_hash.split()
pcnt_file_array=output_text.split()
# goal: restore the break points i.e. the same number of lines as the srt file
# this is necessary, because each line in the srt file corresponds to a frame from the video
if len(srt_file_array)!=len(pcnt_file_array):
return "AssertError: The length of the transcript and the punctuated file should be the same: ",len(srt_file_array),len(pcnt_file_array)
pcnt_file_array_hash = []
for idx, item in enumerate(srt_file_array):
if item.endswith('#'):
pcnt_file_array_hash.append(pcnt_file_array[idx]+'#')
else:
pcnt_file_array_hash.append(pcnt_file_array[idx])
# assemble the array back to a string
pcnt_file_cr=' '.join(pcnt_file_array_hash).replace('#','\n')
elif 'sentences' in brakes:
split_text = output_text.split('. ')
pcnt_file_cr = '.\n'.join(split_text)
regex1 = r"\bi\b"
regex2 = r"(?<=[.?!;])\s*\w"
regex3 = r"^\w"
pcnt_file_cr_cap = re.sub(regex3, lambda x: x.group().upper(), re.sub(regex2, lambda x: x.group().upper(), re.sub(regex1, "I", pcnt_file_cr)))
metrics.load_nltk()
n_tokens= metrics.num_tokens(pcnt_file_cr_cap)
n_sents = metrics.num_sentences(pcnt_file_cr_cap)
n_words = metrics.num_words(pcnt_file_cr_cap)
n_chars = metrics.num_chars(pcnt_file_cr_cap)
return pcnt_file_cr_cap, n_words, n_sents, n_chars, n_tokens