haixuantao commited on
Commit
3c8b0eb
·
1 Parent(s): 1646f18

Fixing multiples small bugs

Browse files
graphs/dataflow_robot_vlm_minimize.yml CHANGED
@@ -11,6 +11,7 @@ nodes:
11
  image:
12
  source: webcam/image
13
  queue_size: 1
 
14
  outputs:
15
  - speak
16
  - control
@@ -31,4 +32,11 @@ nodes:
31
  operator:
32
  python: ../operators/plot.py
33
  inputs:
34
- image: webcam/image
 
 
 
 
 
 
 
 
11
  image:
12
  source: webcam/image
13
  queue_size: 1
14
+ text: whisper/text
15
  outputs:
16
  - speak
17
  - control
 
32
  operator:
33
  python: ../operators/plot.py
34
  inputs:
35
+ image: webcam/image
36
+ - id: whisper
37
+ operator:
38
+ python: ../operators/whisper_op.py
39
+ inputs:
40
+ audio: dora/timer/millis/1000
41
+ outputs:
42
+ - text
operators/parler_op.py CHANGED
@@ -5,14 +5,14 @@ import pygame
5
  from dora import DoraStatus
6
 
7
  model = ParlerTTSForConditionalGeneration.from_pretrained(
8
- "parler-tts/parler_tts_mini_v0.1"
9
  ).to("cuda:0")
10
- tokenizer = AutoTokenizer.from_pretrained("parler-tts/parler_tts_mini_v0.1")
11
 
12
  pygame.mixer.init()
13
 
14
  input_ids = tokenizer(
15
- "A female speaker with a slightly low-pitched voice delivers her words quite expressively, in a very confined sounding environment with clear audio quality. She speaks very fast.",
16
  return_tensors="pt",
17
  ).input_ids.to("cuda:0")
18
 
@@ -25,8 +25,8 @@ class Operator:
25
  ):
26
  if dora_event["type"] == "INPUT":
27
  generation = model.generate(
28
- max_new_tokens=300,
29
  input_ids=input_ids,
 
30
  prompt_input_ids=tokenizer(
31
  dora_event["value"][0].as_py(), return_tensors="pt"
32
  ).input_ids.to("cuda:0"),
@@ -38,9 +38,15 @@ class Operator:
38
  model.config.sampling_rate,
39
  )
40
 
41
- while pygame.mixer.get_busy():
42
- pass
43
  pygame.mixer.music.load(f"parler_tts_out.wav")
44
  pygame.mixer.music.play()
 
 
45
 
46
  return DoraStatus.CONTINUE
 
 
 
 
 
 
 
5
  from dora import DoraStatus
6
 
7
  model = ParlerTTSForConditionalGeneration.from_pretrained(
8
+ "ylacombe/parler-tts-mini-jenny-30H"
9
  ).to("cuda:0")
10
+ tokenizer = AutoTokenizer.from_pretrained("ylacombe/parler-tts-mini-jenny-30H")
11
 
12
  pygame.mixer.init()
13
 
14
  input_ids = tokenizer(
15
+ "Jenny delivers her words quite expressively, in a very confined sounding environment with clear audio quality.",
16
  return_tensors="pt",
17
  ).input_ids.to("cuda:0")
18
 
 
25
  ):
26
  if dora_event["type"] == "INPUT":
27
  generation = model.generate(
 
28
  input_ids=input_ids,
29
+ min_new_tokens=100,
30
  prompt_input_ids=tokenizer(
31
  dora_event["value"][0].as_py(), return_tensors="pt"
32
  ).input_ids.to("cuda:0"),
 
38
  model.config.sampling_rate,
39
  )
40
 
 
 
41
  pygame.mixer.music.load(f"parler_tts_out.wav")
42
  pygame.mixer.music.play()
43
+ while pygame.mixer.get_busy():
44
+ pass
45
 
46
  return DoraStatus.CONTINUE
47
+
48
+
49
+ # op = Operator()
50
+ # import pyarrow as pa
51
+
52
+ # op.on_event({"type": "INPUT", "value": pa.array(["Hello, how are you?"])}, None)
operators/robot_minimize.py CHANGED
@@ -21,10 +21,10 @@ for dora_event in node:
21
  [x, y, z, xy_speed, z_speed, pitch, yaw] = dora_event["value"].to_numpy()
22
  print(dora_event["value"].to_numpy())
23
  event = ep_robot.gimbal.moveto(
24
- pitch=pitch, yaw=yaw, pitch_speed=50.0, yaw_speed=50.0
25
  )
26
  wait(event)
27
- sleep(4)
28
  event = ep_robot.chassis.move(x=x, y=y, z=z, xy_speed=xy_speed, z_speed=z_speed)
29
  wait(event)
30
  sleep(6)
 
21
  [x, y, z, xy_speed, z_speed, pitch, yaw] = dora_event["value"].to_numpy()
22
  print(dora_event["value"].to_numpy())
23
  event = ep_robot.gimbal.moveto(
24
+ pitch=pitch, yaw=yaw, pitch_speed=60.0, yaw_speed=50.0
25
  )
26
  wait(event)
27
+ sleep(2)
28
  event = ep_robot.chassis.move(x=x, y=y, z=z, xy_speed=xy_speed, z_speed=z_speed)
29
  wait(event)
30
  sleep(6)
operators/whisper_op.py CHANGED
@@ -4,14 +4,17 @@ from pynput import keyboard
4
  from pynput.keyboard import Key
5
  from dora import DoraStatus
6
 
 
7
  import numpy as np
8
  import pyarrow as pa
9
  import sounddevice as sd
 
10
 
11
  model = whisper.load_model("base")
12
 
13
  SAMPLE_RATE = 16000
14
- MAX_DURATION = 20
 
15
 
16
 
17
  class Operator:
@@ -19,6 +22,9 @@ class Operator:
19
  Transforming Speech to Text using OpenAI Whisper model
20
  """
21
 
 
 
 
22
  def on_event(
23
  self,
24
  dora_event,
@@ -30,10 +36,17 @@ class Operator:
30
  with keyboard.Events() as events:
31
  event = events.get(1.0)
32
  if event is not None and event.key == Key.up:
33
- send_output("led", pa.array([0, 255, 0]))
 
 
 
 
 
 
 
34
  ## Microphone
35
  audio_data = sd.rec(
36
- int(SAMPLE_RATE * MAX_DURATION),
37
  samplerate=SAMPLE_RATE,
38
  channels=1,
39
  dtype=np.int16,
@@ -48,11 +61,9 @@ class Operator:
48
  send_output(
49
  "text", pa.array([result["text"]]), dora_event["metadata"]
50
  )
51
- send_output("led", pa.array([0, 0, 255]))
52
- del model
53
-
54
- import gc # garbage collect library
55
 
56
  gc.collect()
 
57
 
58
  return DoraStatus.CONTINUE
 
4
  from pynput.keyboard import Key
5
  from dora import DoraStatus
6
 
7
+ import torch
8
  import numpy as np
9
  import pyarrow as pa
10
  import sounddevice as sd
11
+ import gc # garbage collect library
12
 
13
  model = whisper.load_model("base")
14
 
15
  SAMPLE_RATE = 16000
16
+ MAX_DURATION = 10
17
+ MIN_DURATION = 6
18
 
19
 
20
  class Operator:
 
22
  Transforming Speech to Text using OpenAI Whisper model
23
  """
24
 
25
+ def __init__(self) -> None:
26
+ self.policy_init = False
27
+
28
  def on_event(
29
  self,
30
  dora_event,
 
36
  with keyboard.Events() as events:
37
  event = events.get(1.0)
38
  if event is not None and event.key == Key.up:
39
+ # send_output("led", pa.array([0, 255, 0]))
40
+
41
+ if self.policy_init == False:
42
+ self.policy_init = True
43
+ duration = MAX_DURATION
44
+ else:
45
+ duration = MIN_DURATION
46
+
47
  ## Microphone
48
  audio_data = sd.rec(
49
+ int(SAMPLE_RATE * duration),
50
  samplerate=SAMPLE_RATE,
51
  channels=1,
52
  dtype=np.int16,
 
61
  send_output(
62
  "text", pa.array([result["text"]]), dora_event["metadata"]
63
  )
64
+ # send_output("led", pa.array([0, 0, 255]))
 
 
 
65
 
66
  gc.collect()
67
+ torch.cuda.empty_cache()
68
 
69
  return DoraStatus.CONTINUE