GordonM commited on
Commit
9f2b3ad
·
1 Parent(s): f086f4e

add models

Browse files
.DS_Store ADDED
Binary file (10.2 kB). View file
 
.gitattributes CHANGED
@@ -25,3 +25,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
25
  *.zip filter=lfs diff=lfs merge=lfs -text
26
  *.zstandard filter=lfs diff=lfs merge=lfs -text
27
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
25
  *.zip filter=lfs diff=lfs merge=lfs -text
26
  *.zstandard filter=lfs diff=lfs merge=lfs -text
27
  *tfevents* filter=lfs diff=lfs merge=lfs -text
28
+ *.mp4 filter=lfs diff=lfs merge=lfs -text
README.md CHANGED
@@ -1,12 +1,12 @@
1
  ---
2
  title: BandiCount
3
- emoji: 📚
4
- colorFrom: red
5
- colorTo: red
6
  sdk: gradio
7
  sdk_version: 3.0.5
8
  app_file: app.py
9
  pinned: false
10
  ---
11
 
12
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces#reference
 
 
 
1
  ---
2
  title: BandiCount
3
+ emoji: 🐨
 
 
4
  sdk: gradio
5
  sdk_version: 3.0.5
6
  app_file: app.py
7
  pinned: false
8
  ---
9
 
10
+ # BandiCount
11
+
12
+ State of the art object-detection model for detecting Australian native animal species in NSW national parks.
app.py ADDED
@@ -0,0 +1,28 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ import torch
3
+ import torchvision
4
+ import numpy as np
5
+ from PIL import Image
6
+
7
+ # Load model weights
8
+ model = torch.hub.load('ultralytics/yolov5', 'custom', "model_weights/datasets_1000_41class.pt")
9
+
10
+ # Define a yolo prediction function
11
+ def yolo(im, size=640):
12
+ g = (size / max(im.size)) # gain
13
+ im = im.resize((int(x * g) for x in im.size), Image.ANTIALIAS) # resize
14
+
15
+ results = model(im) # inference
16
+ results.render() # updates results.imgs with boxes and labels
17
+ return Image.fromarray(results.imgs[0])
18
+
19
+
20
+ inputs = gr.inputs.Image(type='pil', label="Original Image")
21
+ outputs = gr.outputs.Image(type="pil", label="Output Image")
22
+
23
+ title = "BandiCount: Detecting Australian native animal species"
24
+ description = "BandiCount: Detecting Australian native animal species in NSW national parks, using object detection. Upload an image or click an example image to use."
25
+ article = ""
26
+
27
+ examples = [['BrushtailPossum.jpg'], ['Eagle.jpg'], ['Macropod.jpg'], ['cat.jpg'], ['echidna.gif'], ['fox_in_snow.mp4'], ['godzilla_fantail.png'], ['ibis.jpg'], ['koala1.jpeg'], ['koala2.jpg'], ['lyrebird.mp4']]
28
+ gr.Interface(yolo, inputs, outputs, title=title, description=description, article=article, examples=examples, theme="huggingface").launch(cache_examples=True,enable_queue=True)
content/.DS_Store ADDED
Binary file (6.15 kB). View file
 
content/README.md ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ # Content
2
+
3
+ Figures and pictures for the documentation go here.
data/.DS_Store ADDED
Binary file (6.15 kB). View file
 
data/BrushtailPossum.jpg ADDED
data/Eagle.jpg ADDED
data/Macropod.jpg ADDED
data/README.md ADDED
@@ -0,0 +1,36 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Data
2
+
3
+ Data goes here. However, I don't own the data, so I am not going to stick it on github.
4
+
5
+ Instead, let's go get the data!
6
+
7
+ Download it from [kaggle](https://www.kaggle.com/datasets/aditya276/face-mask-dataset-yolo-format)
8
+
9
+ Then, in terminal:
10
+
11
+ ```bash
12
+ unzip archive.zip
13
+ ```
14
+
15
+ Next, we will need to make sure the files are ordered in the way that YOLO likes.
16
+
17
+ ```bash
18
+ mkdir images/test/labels
19
+ mkdir images/train/labels
20
+ mkdir images/valid/labels
21
+ mkdir images/test/images
22
+ mkdir images/train/images
23
+ mkdir images/valid/images
24
+
25
+ mv images/test/*.txt images/test/labels
26
+ mv images/train/*.txt images/train/labels
27
+ mv images/valid/*.txt images/valid/labels
28
+
29
+ mv images/test/* images/test/images
30
+ mv images/train/* images/train/images
31
+ mv images/valid/* images/validimages
32
+ ```
33
+
34
+ ## Another face mask dataset
35
+
36
+ [This one looks better than kaggle](https://mvrigkas.github.io/FaceMaskDataset/)
data/cat.jpg ADDED
data/echidna.gif ADDED
data/fox_in_snow.mp4 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:da7e02104cdc5d2a2efaa8a2211d1ec772c0396037de430d8a5a8c718ee3b986
3
+ size 5656379
data/godzilla_fantail.png ADDED
data/ibis.jpg ADDED
data/koala1.jpeg ADDED
data/koala2.jpg ADDED
data/lyrebird.mp4 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f3075ea75131a5502f658d9ac200e91e225d265cbb939c4b157165732e85f167
3
+ size 2365182
model_weights/.DS_Store ADDED
Binary file (6.15 kB). View file
 
model_weights/72class_yolov5l.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c6a1c7f1b928e2ea56122144cb367572bb1ae9dab4cd32b60d1d090e4c8c143c
3
+ size 93589989
model_weights/datasets_1000_41class.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0089587b047328825727c30c307dfd97e830ee4c8dca6bc97dbfdafce7619f3a
3
+ size 57647815
model_weights/datasets_150_72class.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b2ac6ed5895d63b09c3f229ff4e5f7227acea08ffb933fb38a73c8b8279e5f4b
3
+ size 14786997
requirements.txt ADDED
@@ -0,0 +1,39 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+
3
+ # pip install -r requirements.txt
4
+
5
+ # Base ----------------------------------------
6
+ matplotlib>=3.2.2
7
+ numpy>=1.18.5
8
+ opencv-python>=4.1.1
9
+ Pillow>=7.1.2
10
+ PyYAML>=5.3.1
11
+ requests>=2.23.0
12
+ scipy>=1.4.1 # Google Colab version
13
+ torch>=1.7.0
14
+ torchvision>=0.8.1
15
+ tqdm>=4.41.0
16
+
17
+ # Logging -------------------------------------
18
+ tensorboard>=2.4.1
19
+ # wandb
20
+
21
+ # Plotting ------------------------------------
22
+ pandas>=1.1.4
23
+ seaborn>=0.11.0
24
+
25
+ # Export --------------------------------------
26
+ # coremltools>=4.1 # CoreML export
27
+ # onnx>=1.9.0 # ONNX export
28
+ # onnx-simplifier>=0.3.6 # ONNX simplifier
29
+ # scikit-learn==0.19.2 # CoreML quantization
30
+ # tensorflow>=2.4.1 # TFLite export
31
+ # tensorflowjs>=3.9.0 # TF.js export
32
+ # openvino-dev # OpenVINO export
33
+
34
+ # Extras --------------------------------------
35
+ # albumentations>=1.0.3
36
+ # Cython # for pycocotools https://github.com/cocodataset/cocoapi/issues/172
37
+ # pycocotools>=2.0 # COCO mAP
38
+ # roboflow
39
+ thop # FLOPs computation