File size: 1,992 Bytes
034b730
 
ffc2aa4
 
034b730
 
3c8b0eb
ffc2aa4
 
 
3c8b0eb
034b730
 
 
ffc2aa4
3c8b0eb
 
ffc2aa4
034b730
 
 
 
 
 
3c8b0eb
 
 
034b730
 
 
 
 
8852f54
034b730
ffc2aa4
 
 
 
3c8b0eb
 
 
 
 
 
 
 
ffc2aa4
 
3c8b0eb
ffc2aa4
 
 
 
 
 
 
 
 
 
 
 
 
 
3c8b0eb
8852f54
 
3c8b0eb
8852f54
034b730
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
import pyarrow as pa
import whisper
from pynput import keyboard
from pynput.keyboard import Key
from dora import DoraStatus

import torch
import numpy as np
import pyarrow as pa
import sounddevice as sd
import gc  # garbage collect library

model = whisper.load_model("base")

SAMPLE_RATE = 16000
MAX_DURATION = 10
MIN_DURATION = 6


class Operator:
    """
    Transforming Speech to Text using OpenAI Whisper model
    """

    def __init__(self) -> None:
        self.policy_init = False

    def on_event(
        self,
        dora_event,
        send_output,
    ) -> DoraStatus:
        global model
        if dora_event["type"] == "INPUT":
            ## Check for keyboard event
            with keyboard.Events() as events:
                event = events.get(1.0)
                if event is not None and event.key == Key.up:
                    # send_output("led", pa.array([0, 255, 0]))

                    if self.policy_init == False:
                        self.policy_init = True
                        duration = MAX_DURATION
                    else:
                        duration = MIN_DURATION

                    ## Microphone
                    audio_data = sd.rec(
                        int(SAMPLE_RATE * duration),
                        samplerate=SAMPLE_RATE,
                        channels=1,
                        dtype=np.int16,
                        blocking=True,
                    )

                    audio = audio_data.ravel().astype(np.float32) / 32768.0

                    ## Speech to text
                    audio = whisper.pad_or_trim(audio)
                    result = model.transcribe(audio, language="en")
                    send_output(
                        "text", pa.array([result["text"]]), dora_event["metadata"]
                    )
                    # send_output("led", pa.array([0, 0, 255]))

                    gc.collect()
                    torch.cuda.empty_cache()

        return DoraStatus.CONTINUE