vaishanthr commited on
Commit
a62ae31
·
1 Parent(s): 4737f4c

updated files

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. .gitattributes +2 -0
  2. app.py +149 -0
  3. assets/images/img_1.jpg +0 -0
  4. assets/images/img_2.jpg +0 -0
  5. assets/images/img_3.jpg +0 -0
  6. assets/videos/vid_1.mp4 +3 -0
  7. assets/videos/vid_2.mp4 +3 -0
  8. datasets/hand_dataset_bbox/README.dataset.txt +6 -0
  9. datasets/hand_dataset_bbox/README.roboflow.txt +35 -0
  10. datasets/hand_dataset_bbox/data.yaml +12 -0
  11. datasets/hand_dataset_bbox/test/images/Sign_language_interpreter_vid_3_mp4-15_jpg.rf.648c927a529d159315b14692b4823a73.jpg +0 -0
  12. datasets/hand_dataset_bbox/test/images/Sign_language_interpreter_vid_3_mp4-24_jpg.rf.5a2c8df29ac4bd6456b94b45ff252068.jpg +0 -0
  13. datasets/hand_dataset_bbox/test/images/Sign_language_interpreter_vid_3_mp4-8_jpg.rf.b72e8e36f12ff26b13c580e247ea995b.jpg +0 -0
  14. datasets/hand_dataset_bbox/test/images/Sign_language_interpreter_vid_4_mp4-17_jpg.rf.60b53afc4553c743bf21ba016b29b551.jpg +0 -0
  15. datasets/hand_dataset_bbox/test/images/Sign_language_interpreter_vid_4_mp4-33_jpg.rf.9c8b9c92d4e9d64b471cf495a06b5f77.jpg +0 -0
  16. datasets/hand_dataset_bbox/test/images/Sign_language_interpreter_vid_4_mp4-42_jpg.rf.d0ff24f8fa95d380f906159a39009912.jpg +0 -0
  17. datasets/hand_dataset_bbox/test/images/sign_language_interpreter_vid_2_mp4-18_jpg.rf.2b7ded8aacb6746b672598d788785529.jpg +0 -0
  18. datasets/hand_dataset_bbox/test/images/sign_language_interpreter_vid_2_mp4-2_jpg.rf.d068568645cf6bef50ac41567ea666d2.jpg +0 -0
  19. datasets/hand_dataset_bbox/test/images/sign_language_interpreter_vid_2_mp4-6_jpg.rf.bdaa2043a4e6f5dc7845ef7f5b99371b.jpg +0 -0
  20. datasets/hand_dataset_bbox/test/images/sign_language_interpreter_vid_2_mp4-9_jpg.rf.d258d97eb97ad74b58c993a0a6ced074.jpg +0 -0
  21. datasets/hand_dataset_bbox/test/images/sign_language_interpreter_vid_3_mp4-14_jpg.rf.de46e9b06a0ec2903988a625871f65cc.jpg +0 -0
  22. datasets/hand_dataset_bbox/test/labels/Sign_language_interpreter_vid_3_mp4-15_jpg.rf.648c927a529d159315b14692b4823a73.txt +1 -0
  23. datasets/hand_dataset_bbox/test/labels/Sign_language_interpreter_vid_3_mp4-24_jpg.rf.5a2c8df29ac4bd6456b94b45ff252068.txt +1 -0
  24. datasets/hand_dataset_bbox/test/labels/Sign_language_interpreter_vid_3_mp4-8_jpg.rf.b72e8e36f12ff26b13c580e247ea995b.txt +2 -0
  25. datasets/hand_dataset_bbox/test/labels/Sign_language_interpreter_vid_4_mp4-17_jpg.rf.60b53afc4553c743bf21ba016b29b551.txt +2 -0
  26. datasets/hand_dataset_bbox/test/labels/Sign_language_interpreter_vid_4_mp4-33_jpg.rf.9c8b9c92d4e9d64b471cf495a06b5f77.txt +2 -0
  27. datasets/hand_dataset_bbox/test/labels/Sign_language_interpreter_vid_4_mp4-42_jpg.rf.d0ff24f8fa95d380f906159a39009912.txt +1 -0
  28. datasets/hand_dataset_bbox/test/labels/sign_language_interpreter_vid_2_mp4-18_jpg.rf.2b7ded8aacb6746b672598d788785529.txt +2 -0
  29. datasets/hand_dataset_bbox/test/labels/sign_language_interpreter_vid_2_mp4-2_jpg.rf.d068568645cf6bef50ac41567ea666d2.txt +1 -0
  30. datasets/hand_dataset_bbox/test/labels/sign_language_interpreter_vid_2_mp4-6_jpg.rf.bdaa2043a4e6f5dc7845ef7f5b99371b.txt +1 -0
  31. datasets/hand_dataset_bbox/test/labels/sign_language_interpreter_vid_2_mp4-9_jpg.rf.d258d97eb97ad74b58c993a0a6ced074.txt +2 -0
  32. datasets/hand_dataset_bbox/test/labels/sign_language_interpreter_vid_3_mp4-14_jpg.rf.de46e9b06a0ec2903988a625871f65cc.txt +2 -0
  33. datasets/hand_dataset_bbox/train/images/Sign_language_interpreter_vid_3_mp4-10_jpg.rf.41dd6aa601e4e3fc8aa625242ed43cfd.jpg +0 -0
  34. datasets/hand_dataset_bbox/train/images/Sign_language_interpreter_vid_3_mp4-10_jpg.rf.715609f659ba15fdca150fd2b3dd170f.jpg +0 -0
  35. datasets/hand_dataset_bbox/train/images/Sign_language_interpreter_vid_3_mp4-10_jpg.rf.88b413ef3003f9f757b25b527024335c.jpg +0 -0
  36. datasets/hand_dataset_bbox/train/images/Sign_language_interpreter_vid_3_mp4-16_jpg.rf.1857f2bde23a7628262a4df8c19a9d92.jpg +0 -0
  37. datasets/hand_dataset_bbox/train/images/Sign_language_interpreter_vid_3_mp4-16_jpg.rf.3d3b0d4574213994cf019ed02b377c70.jpg +0 -0
  38. datasets/hand_dataset_bbox/train/images/Sign_language_interpreter_vid_3_mp4-16_jpg.rf.ac5c8d9d6fa8e57cb0ca5be835cc8542.jpg +0 -0
  39. datasets/hand_dataset_bbox/train/images/Sign_language_interpreter_vid_3_mp4-17_jpg.rf.4faf7d367d49161d2b5c83064e66508e.jpg +0 -0
  40. datasets/hand_dataset_bbox/train/images/Sign_language_interpreter_vid_3_mp4-17_jpg.rf.d6777e8932edd0b903ac5d6b0e87b6a8.jpg +0 -0
  41. datasets/hand_dataset_bbox/train/images/Sign_language_interpreter_vid_3_mp4-17_jpg.rf.e5740ef33fa6b33b2bf707179ce14df1.jpg +0 -0
  42. datasets/hand_dataset_bbox/train/images/Sign_language_interpreter_vid_3_mp4-18_jpg.rf.0f7eb292e6f3d0b6f28b07b33310d152.jpg +0 -0
  43. datasets/hand_dataset_bbox/train/images/Sign_language_interpreter_vid_3_mp4-18_jpg.rf.517e55fac656ecfd8e95cb25cf89ea19.jpg +0 -0
  44. datasets/hand_dataset_bbox/train/images/Sign_language_interpreter_vid_3_mp4-18_jpg.rf.ea73c893024489b7179f577ca90cdde1.jpg +0 -0
  45. datasets/hand_dataset_bbox/train/images/Sign_language_interpreter_vid_3_mp4-19_jpg.rf.13cf46bf5cb899849d5973034036d320.jpg +0 -0
  46. datasets/hand_dataset_bbox/train/images/Sign_language_interpreter_vid_3_mp4-19_jpg.rf.4b6ce6ecfd61492e878bc699925794d9.jpg +0 -0
  47. datasets/hand_dataset_bbox/train/images/Sign_language_interpreter_vid_3_mp4-19_jpg.rf.70949e3252b673c71c0c46c9ec1b6b8c.jpg +0 -0
  48. datasets/hand_dataset_bbox/train/images/Sign_language_interpreter_vid_3_mp4-20_jpg.rf.0daa1675fac8030ab32cd8d94e8e0259.jpg +0 -0
  49. datasets/hand_dataset_bbox/train/images/Sign_language_interpreter_vid_3_mp4-20_jpg.rf.465990e294f1cc637bfc25b0d14ba809.jpg +0 -0
  50. datasets/hand_dataset_bbox/train/images/Sign_language_interpreter_vid_3_mp4-20_jpg.rf.b34eb33a9d9d948b2360073364d6a133.jpg +0 -0
.gitattributes CHANGED
@@ -33,3 +33,5 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
 
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
36
+ assets/videos/vid_1.mp4 filter=lfs diff=lfs merge=lfs -text
37
+ assets/videos/vid_2.mp4 filter=lfs diff=lfs merge=lfs -text
app.py ADDED
@@ -0,0 +1,149 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from ultralytics import YOLO
2
+ import cv2
3
+ import gradio as gr
4
+ import numpy as np
5
+ import os
6
+ import torch
7
+ from image_segmenter import ImageSegmenter
8
+
9
+ # params
10
+ CANCEL_PROCESSING = False
11
+
12
+ img_seg = ImageSegmenter(model_type="yolov8m-seg-custom")
13
+
14
+ def resize(image):
15
+ """
16
+ resize the input nd array
17
+ """
18
+ h, w = image.shape[:2]
19
+ if h > w:
20
+ return cv2.resize(image, (480, 640))
21
+ else:
22
+ return cv2.resize(image, (640, 480))
23
+
24
+ def process_image(image):
25
+ image = resize(image)
26
+ prediction, _ = img_seg.predict(image)
27
+ return prediction
28
+
29
+
30
+ def process_video(vid_path=None):
31
+ vid_cap = cv2.VideoCapture(vid_path)
32
+ while vid_cap.isOpened():
33
+ ret, frame = vid_cap.read()
34
+ if ret:
35
+ print("Making frame predictions ....")
36
+ frame = resize(frame)
37
+ frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
38
+ prediction, _ = img_seg.predict(frame)
39
+ yield prediction
40
+
41
+ return None
42
+
43
+ def update_segmentation_options(options):
44
+ img_seg.is_show_bounding_boxes = True if 'Show Boundary Box' in options else False
45
+ img_seg.is_show_segmentation = True if 'Show Segmentation Region' in options else False
46
+ img_seg.is_show_segmentation_boundary = True if 'Show Segmentation Boundary' in options else False
47
+
48
+ def update_confidence_threshold(thres_val):
49
+ img_seg.confidence_threshold = thres_val/100
50
+
51
+ def model_selector(model_type):
52
+
53
+ if "Small - Better performance and less accuracy" == model_type:
54
+ yolo_model = "yolov8s_seg_custom"
55
+ elif "Medium - Balanced performance and accuracy" == model_type:
56
+ yolo_model = "yolov8m-seg-custom"
57
+ elif "Large - Slow performance and high accuracy" == model_type:
58
+ yolo_model = "yolov8m-seg-custom"
59
+ else:
60
+ yolo_model = "yolov8m-seg-custom"
61
+
62
+ img_seg = ImageSegmenter(model_type=yolo_model)
63
+
64
+ def cancel():
65
+ CANCEL_PROCESSING = True
66
+
67
+ if __name__ == "__main__":
68
+
69
+ # gradio gui app
70
+ with gr.Blocks() as my_app:
71
+
72
+ # title
73
+ gr.Markdown("<h1><center>Hand detection and segmentation</center></h1>")
74
+
75
+ # tabs
76
+ with gr.Tab("Image"):
77
+ with gr.Row():
78
+ with gr.Column(scale=1):
79
+ img_input = gr.Image()
80
+ model_type_img = gr.Dropdown(
81
+ ["Small - Better performance and less accuracy",
82
+ "Medium - Balanced performance and accuracy",
83
+ "Large - Slow performance and high accuracy"],
84
+ label="Model Type", value="Medium - Balanced performance and accuracy",
85
+ info="Select the inference model before running predictions!")
86
+ options_checkbox_img = gr.CheckboxGroup(["Show Boundary Box", "Show Segmentation Region"], label="Options")
87
+ conf_thres_img = gr.Slider(1, 100, value=60, label="Confidence Threshold", info="Choose the threshold above which objects should be detected")
88
+ submit_btn_img = gr.Button(value="Predict")
89
+
90
+ with gr.Column(scale=2):
91
+ with gr.Row():
92
+ img_output = gr.Image(height=300, label="Segmentation")
93
+
94
+ gr.Markdown("## Sample Images")
95
+ gr.Examples(
96
+ examples=[os.path.join(os.path.dirname(__file__), "assets/images/img_1.jpg"),
97
+ os.path.join(os.path.dirname(__file__), "assets/images/img_1.jpg")],
98
+ inputs=img_input,
99
+ outputs=img_output,
100
+ fn=process_image,
101
+ cache_examples=True,
102
+ )
103
+
104
+ with gr.Tab("Video"):
105
+ with gr.Row():
106
+ with gr.Column(scale=1):
107
+ vid_input = gr.Video()
108
+ model_type_vid = gr.Dropdown(
109
+ ["Small - Better performance and less accuracy",
110
+ "Medium - Balanced performance and accuracy",
111
+ "Large - Slow performance and high accuracy"],
112
+ label="Model Type", value="Medium - Balanced performance and accuracy",
113
+ info="Select the inference model before running predictions!")
114
+
115
+ options_checkbox_vid = gr.CheckboxGroup(["Show Boundary Box", "Show Segmentation Region"], label="Options")
116
+ conf_thres_vid = gr.Slider(1, 100, value=60, label="Confidence Threshold", info="Choose the threshold above which objects should be detected")
117
+ with gr.Row():
118
+ cancel_btn = gr.Button(value="Cancel")
119
+ submit_btn_vid = gr.Button(value="Predict")
120
+
121
+ with gr.Column(scale=2):
122
+ with gr.Row():
123
+ vid_output = gr.Image(height=300, label="Segmentation")
124
+
125
+ gr.Markdown("## Sample Videos")
126
+ gr.Examples(
127
+ examples=[os.path.join(os.path.dirname(__file__), "assets/videos/vid_1.mp4"),
128
+ os.path.join(os.path.dirname(__file__), "assets/videos/vid_2.mp4"),],
129
+ inputs=vid_input,
130
+ # outputs=vid_output,
131
+ # fn=vid_segmenation,
132
+ )
133
+
134
+
135
+ # image tab logic
136
+ submit_btn_img.click(process_image, inputs=img_input, outputs=img_output)
137
+ options_checkbox_img.change(update_segmentation_options, options_checkbox_img, [])
138
+ conf_thres_img.change(update_confidence_threshold, conf_thres_img, [])
139
+ model_type_img.change(model_selector, model_type_img, [])
140
+
141
+ # video tab logic
142
+ submit_btn_vid.click(process_video, inputs=vid_input, outputs=vid_output)
143
+ model_type_vid.change(model_selector, model_type_vid, [])
144
+ cancel_btn.click(cancel, inputs=[], outputs=[])
145
+ options_checkbox_vid.change(update_segmentation_options, options_checkbox_vid, [])
146
+ conf_thres_vid.change(update_confidence_threshold, conf_thres_vid, [])
147
+
148
+
149
+ my_app.queue(concurrency_count=5, max_size=20).launch(debug=True)
assets/images/img_1.jpg ADDED
assets/images/img_2.jpg ADDED
assets/images/img_3.jpg ADDED
assets/videos/vid_1.mp4 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ce4d3c0dc28cea27fdedbc54f9de88a57cf56981ffe3a31984631341375880af
3
+ size 4663136
assets/videos/vid_2.mp4 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d041fda27e5fa33e46260e212fc61ed58c6155dd80299b7b1f71ee99629449a5
3
+ size 5585030
datasets/hand_dataset_bbox/README.dataset.txt ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ # Hand Detector > 2023-07-16 1:39pm
2
+ https://universe.roboflow.com/vaishanth-ramaraj-uub2b/hand-detector-pjtzx
3
+
4
+ Provided by a Roboflow user
5
+ License: Public Domain
6
+
datasets/hand_dataset_bbox/README.roboflow.txt ADDED
@@ -0,0 +1,35 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ Hand Detector - v3 2023-07-16 1:39pm
3
+ ==============================
4
+
5
+ This dataset was exported via roboflow.com on July 16, 2023 at 5:41 PM GMT
6
+
7
+ Roboflow is an end-to-end computer vision platform that helps you
8
+ * collaborate with your team on computer vision projects
9
+ * collect & organize images
10
+ * understand and search unstructured image data
11
+ * annotate, and create datasets
12
+ * export, train, and deploy computer vision models
13
+ * use active learning to improve your dataset over time
14
+
15
+ For state of the art Computer Vision training notebooks you can use with this dataset,
16
+ visit https://github.com/roboflow/notebooks
17
+
18
+ To find over 100k other datasets and pre-trained models, visit https://universe.roboflow.com
19
+
20
+ The dataset includes 262 images.
21
+ Hands are annotated in YOLOv8 format.
22
+
23
+ The following pre-processing was applied to each image:
24
+ * Auto-orientation of pixel data (with EXIF-orientation stripping)
25
+ * Resize to 640x640 (Stretch)
26
+
27
+ The following augmentation was applied to create 3 versions of each source image:
28
+ * Random rotation of between -15 and +15 degrees
29
+ * Random shear of between -15° to +15° horizontally and -15° to +15° vertically
30
+ * Salt and pepper noise was applied to 5 percent of pixels
31
+
32
+ The following transformations were applied to the bounding boxes of each image:
33
+ * Random Gaussian blur of between 0 and 0.75 pixels
34
+
35
+
datasets/hand_dataset_bbox/data.yaml ADDED
@@ -0,0 +1,12 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ names:
2
+ - hands
3
+ nc: 1
4
+ roboflow:
5
+ license: Public Domain
6
+ project: hand-detector-pjtzx
7
+ url: https://universe.roboflow.com/vaishanth-ramaraj-uub2b/hand-detector-pjtzx/dataset/3
8
+ version: 3
9
+ workspace: vaishanth-ramaraj-uub2b
10
+ test: ../test/images
11
+ train: /content/drive/MyDrive/My Projects/yolo_custom_training/Hand-Detector-3/train/images
12
+ val: /content/drive/MyDrive/My Projects/yolo_custom_training/Hand-Detector-3/valid/images
datasets/hand_dataset_bbox/test/images/Sign_language_interpreter_vid_3_mp4-15_jpg.rf.648c927a529d159315b14692b4823a73.jpg ADDED
datasets/hand_dataset_bbox/test/images/Sign_language_interpreter_vid_3_mp4-24_jpg.rf.5a2c8df29ac4bd6456b94b45ff252068.jpg ADDED
datasets/hand_dataset_bbox/test/images/Sign_language_interpreter_vid_3_mp4-8_jpg.rf.b72e8e36f12ff26b13c580e247ea995b.jpg ADDED
datasets/hand_dataset_bbox/test/images/Sign_language_interpreter_vid_4_mp4-17_jpg.rf.60b53afc4553c743bf21ba016b29b551.jpg ADDED
datasets/hand_dataset_bbox/test/images/Sign_language_interpreter_vid_4_mp4-33_jpg.rf.9c8b9c92d4e9d64b471cf495a06b5f77.jpg ADDED
datasets/hand_dataset_bbox/test/images/Sign_language_interpreter_vid_4_mp4-42_jpg.rf.d0ff24f8fa95d380f906159a39009912.jpg ADDED
datasets/hand_dataset_bbox/test/images/sign_language_interpreter_vid_2_mp4-18_jpg.rf.2b7ded8aacb6746b672598d788785529.jpg ADDED
datasets/hand_dataset_bbox/test/images/sign_language_interpreter_vid_2_mp4-2_jpg.rf.d068568645cf6bef50ac41567ea666d2.jpg ADDED
datasets/hand_dataset_bbox/test/images/sign_language_interpreter_vid_2_mp4-6_jpg.rf.bdaa2043a4e6f5dc7845ef7f5b99371b.jpg ADDED
datasets/hand_dataset_bbox/test/images/sign_language_interpreter_vid_2_mp4-9_jpg.rf.d258d97eb97ad74b58c993a0a6ced074.jpg ADDED
datasets/hand_dataset_bbox/test/images/sign_language_interpreter_vid_3_mp4-14_jpg.rf.de46e9b06a0ec2903988a625871f65cc.jpg ADDED
datasets/hand_dataset_bbox/test/labels/Sign_language_interpreter_vid_3_mp4-15_jpg.rf.648c927a529d159315b14692b4823a73.txt ADDED
@@ -0,0 +1 @@
 
 
1
+ 0 0.37109375 0.79453125 0.128125 0.1625
datasets/hand_dataset_bbox/test/labels/Sign_language_interpreter_vid_3_mp4-24_jpg.rf.5a2c8df29ac4bd6456b94b45ff252068.txt ADDED
@@ -0,0 +1 @@
 
 
1
+ 0 0.378125 0.73203125 0.11796875 0.1578125
datasets/hand_dataset_bbox/test/labels/Sign_language_interpreter_vid_3_mp4-8_jpg.rf.b72e8e36f12ff26b13c580e247ea995b.txt ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ 0 0.32421875 0.44921875 0.0953125 0.16796875
2
+ 0 0.3859375 0.63984375 0.11875 0.115625
datasets/hand_dataset_bbox/test/labels/Sign_language_interpreter_vid_4_mp4-17_jpg.rf.60b53afc4553c743bf21ba016b29b551.txt ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ 0 0.4921875 0.409375 0.10078125 0.2375
2
+ 0 0.5078125 0.559375 0.1203125 0.178125
datasets/hand_dataset_bbox/test/labels/Sign_language_interpreter_vid_4_mp4-33_jpg.rf.9c8b9c92d4e9d64b471cf495a06b5f77.txt ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ 0 0.48203125 0.39140625 0.06484375 0.296875
2
+ 0 0.55625 0.3609375 0.159375 0.21015625
datasets/hand_dataset_bbox/test/labels/Sign_language_interpreter_vid_4_mp4-42_jpg.rf.d0ff24f8fa95d380f906159a39009912.txt ADDED
@@ -0,0 +1 @@
 
 
1
+ 0 0.49453125 0.21796875 0.1390625 0.28671875
datasets/hand_dataset_bbox/test/labels/sign_language_interpreter_vid_2_mp4-18_jpg.rf.2b7ded8aacb6746b672598d788785529.txt ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ 0 0.62265625 0.49453125 0.1796875 0.26875
2
+ 0 0.740625 0.85859375 0.16796875 0.1796875
datasets/hand_dataset_bbox/test/labels/sign_language_interpreter_vid_2_mp4-2_jpg.rf.d068568645cf6bef50ac41567ea666d2.txt ADDED
@@ -0,0 +1 @@
 
 
1
+ 0 0.5046875 0.3578125 0.15234375 0.2375
datasets/hand_dataset_bbox/test/labels/sign_language_interpreter_vid_2_mp4-6_jpg.rf.bdaa2043a4e6f5dc7845ef7f5b99371b.txt ADDED
@@ -0,0 +1 @@
 
 
1
+ 0 0.49921875 0.52578125 0.12265625 0.2265625
datasets/hand_dataset_bbox/test/labels/sign_language_interpreter_vid_2_mp4-9_jpg.rf.d258d97eb97ad74b58c993a0a6ced074.txt ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ 0 0.60703125 0.584375 0.19765625 0.2578125
2
+ 0 0.596875 0.8796875 0.20234375 0.22578125
datasets/hand_dataset_bbox/test/labels/sign_language_interpreter_vid_3_mp4-14_jpg.rf.de46e9b06a0ec2903988a625871f65cc.txt ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ 0 0.61875 0.34453125 0.1421875 0.21171875
2
+ 0 0.4828125 0.37265625 0.15703125 0.184375
datasets/hand_dataset_bbox/train/images/Sign_language_interpreter_vid_3_mp4-10_jpg.rf.41dd6aa601e4e3fc8aa625242ed43cfd.jpg ADDED
datasets/hand_dataset_bbox/train/images/Sign_language_interpreter_vid_3_mp4-10_jpg.rf.715609f659ba15fdca150fd2b3dd170f.jpg ADDED
datasets/hand_dataset_bbox/train/images/Sign_language_interpreter_vid_3_mp4-10_jpg.rf.88b413ef3003f9f757b25b527024335c.jpg ADDED
datasets/hand_dataset_bbox/train/images/Sign_language_interpreter_vid_3_mp4-16_jpg.rf.1857f2bde23a7628262a4df8c19a9d92.jpg ADDED
datasets/hand_dataset_bbox/train/images/Sign_language_interpreter_vid_3_mp4-16_jpg.rf.3d3b0d4574213994cf019ed02b377c70.jpg ADDED
datasets/hand_dataset_bbox/train/images/Sign_language_interpreter_vid_3_mp4-16_jpg.rf.ac5c8d9d6fa8e57cb0ca5be835cc8542.jpg ADDED
datasets/hand_dataset_bbox/train/images/Sign_language_interpreter_vid_3_mp4-17_jpg.rf.4faf7d367d49161d2b5c83064e66508e.jpg ADDED
datasets/hand_dataset_bbox/train/images/Sign_language_interpreter_vid_3_mp4-17_jpg.rf.d6777e8932edd0b903ac5d6b0e87b6a8.jpg ADDED
datasets/hand_dataset_bbox/train/images/Sign_language_interpreter_vid_3_mp4-17_jpg.rf.e5740ef33fa6b33b2bf707179ce14df1.jpg ADDED
datasets/hand_dataset_bbox/train/images/Sign_language_interpreter_vid_3_mp4-18_jpg.rf.0f7eb292e6f3d0b6f28b07b33310d152.jpg ADDED
datasets/hand_dataset_bbox/train/images/Sign_language_interpreter_vid_3_mp4-18_jpg.rf.517e55fac656ecfd8e95cb25cf89ea19.jpg ADDED
datasets/hand_dataset_bbox/train/images/Sign_language_interpreter_vid_3_mp4-18_jpg.rf.ea73c893024489b7179f577ca90cdde1.jpg ADDED
datasets/hand_dataset_bbox/train/images/Sign_language_interpreter_vid_3_mp4-19_jpg.rf.13cf46bf5cb899849d5973034036d320.jpg ADDED
datasets/hand_dataset_bbox/train/images/Sign_language_interpreter_vid_3_mp4-19_jpg.rf.4b6ce6ecfd61492e878bc699925794d9.jpg ADDED
datasets/hand_dataset_bbox/train/images/Sign_language_interpreter_vid_3_mp4-19_jpg.rf.70949e3252b673c71c0c46c9ec1b6b8c.jpg ADDED
datasets/hand_dataset_bbox/train/images/Sign_language_interpreter_vid_3_mp4-20_jpg.rf.0daa1675fac8030ab32cd8d94e8e0259.jpg ADDED
datasets/hand_dataset_bbox/train/images/Sign_language_interpreter_vid_3_mp4-20_jpg.rf.465990e294f1cc637bfc25b0d14ba809.jpg ADDED
datasets/hand_dataset_bbox/train/images/Sign_language_interpreter_vid_3_mp4-20_jpg.rf.b34eb33a9d9d948b2360073364d6a133.jpg ADDED