songhieng commited on
Commit
354940a
·
verified ·
1 Parent(s): e8330ee

Upload 195 files

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. .gitattributes +5 -0
  2. Dockerfile +20 -0
  3. README.md +11 -12
  4. app.py +213 -0
  5. deepface/.gitattributes +1 -0
  6. deepface/.github/FUNDING.yml +3 -0
  7. deepface/.github/ISSUE_TEMPLATE/01-report-bug.yaml +86 -0
  8. deepface/.github/ISSUE_TEMPLATE/02-request-feature.yaml +18 -0
  9. deepface/.github/ISSUE_TEMPLATE/03-documentation.yaml +18 -0
  10. deepface/.github/ISSUE_TEMPLATE/config.yml +5 -0
  11. deepface/.github/pull_request_template.md +13 -0
  12. deepface/.github/workflows/tests.yml +69 -0
  13. deepface/.gitignore +20 -0
  14. deepface/.pylintrc +641 -0
  15. deepface/.vscode/settings.json +18 -0
  16. deepface/CITATION.md +71 -0
  17. deepface/Dockerfile +58 -0
  18. deepface/LICENSE +21 -0
  19. deepface/Makefile +8 -0
  20. deepface/README.md +461 -0
  21. deepface/benchmarks/Evaluate-Results.ipynb +0 -0
  22. deepface/benchmarks/Perform-Experiments.ipynb +352 -0
  23. deepface/benchmarks/README.md +134 -0
  24. deepface/deepface/DeepFace.py +615 -0
  25. deepface/deepface/__init__.py +1 -0
  26. deepface/deepface/api/__init__.py +0 -0
  27. deepface/deepface/api/postman/deepface-api.postman_collection.json +102 -0
  28. deepface/deepface/api/src/__init__.py +0 -0
  29. deepface/deepface/api/src/api.py +9 -0
  30. deepface/deepface/api/src/app.py +18 -0
  31. deepface/deepface/api/src/modules/__init__.py +0 -0
  32. deepface/deepface/api/src/modules/core/__init__.py +0 -0
  33. deepface/deepface/api/src/modules/core/routes.py +96 -0
  34. deepface/deepface/api/src/modules/core/service.py +88 -0
  35. deepface/deepface/commons/__init__.py +0 -0
  36. deepface/deepface/commons/constant.py +4 -0
  37. deepface/deepface/commons/folder_utils.py +34 -0
  38. deepface/deepface/commons/image_utils.py +148 -0
  39. deepface/deepface/commons/logger.py +57 -0
  40. deepface/deepface/commons/package_utils.py +65 -0
  41. deepface/deepface/commons/weight_utils.py +92 -0
  42. deepface/deepface/models/Demography.py +22 -0
  43. deepface/deepface/models/Detector.py +69 -0
  44. deepface/deepface/models/FacialRecognition.py +29 -0
  45. deepface/deepface/models/__init__.py +0 -0
  46. deepface/deepface/models/demography/Age.py +89 -0
  47. deepface/deepface/models/demography/Emotion.py +103 -0
  48. deepface/deepface/models/demography/Gender.py +79 -0
  49. deepface/deepface/models/demography/Race.py +76 -0
  50. deepface/deepface/models/demography/__init__.py +0 -0
.gitattributes CHANGED
@@ -33,3 +33,8 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
 
 
 
 
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
36
+ deepface/tests/dataset/img1.jpg filter=lfs diff=lfs merge=lfs -text
37
+ deepface/tests/dataset/img26.jpg filter=lfs diff=lfs merge=lfs -text
38
+ deepface/tests/dataset/img33.jpg filter=lfs diff=lfs merge=lfs -text
39
+ deepface/tests/dataset/img35.jpg filter=lfs diff=lfs merge=lfs -text
40
+ deepface/tests/dataset/img40.jpg filter=lfs diff=lfs merge=lfs -text
Dockerfile ADDED
@@ -0,0 +1,20 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Use an official Python runtime as a parent image
2
+ FROM python:3.9-slim
3
+
4
+ # Set the working directory in the container
5
+ WORKDIR /app
6
+
7
+ # Copy the requirements file into the container at /app
8
+ COPY requirements.txt /app/requirements.txt
9
+
10
+ # Install any needed packages specified in requirements.txt
11
+ RUN pip install --no-cache-dir -r /app/requirements.txt
12
+
13
+ # Copy the rest of the application code to /app
14
+ COPY . /app
15
+
16
+ # Expose port 8000 to the outside world
17
+ EXPOSE 8000
18
+
19
+ # Command to run the FastAPI application using Uvicorn
20
+ CMD ["uvicorn", "main:app", "--host", "0.0.0.0", "--port", "8000"]
README.md CHANGED
@@ -1,12 +1,11 @@
1
- ---
2
- title: FV V6
3
- emoji: 📊
4
- colorFrom: pink
5
- colorTo: gray
6
- sdk: gradio
7
- sdk_version: 4.42.0
8
- app_file: app.py
9
- pinned: false
10
- ---
11
-
12
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
1
+ ---
2
+ title: FV V4
3
+ emoji: 📈
4
+ colorFrom: red
5
+ colorTo: indigo
6
+ sdk: docker
7
+ pinned: false
8
+ license: mit
9
+ ---
10
+
11
+ Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
app.py ADDED
@@ -0,0 +1,213 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from fastapi import FastAPI, File, UploadFile, HTTPException
2
+ from fastapi.responses import JSONResponse
3
+ import pickle
4
+ import numpy as np
5
+ import face_recognition
6
+ from PIL import Image
7
+ import io
8
+ from mtcnn import MTCNN
9
+ import cv2
10
+ import faiss
11
+ import os
12
+ import imgaug.augmenters as iaa
13
+ from deepface import DeepFace
14
+
15
+ app = FastAPI()
16
+
17
+ # Load encodings from the file
18
+ def load_encodings(file_path):
19
+ if not os.path.exists(file_path):
20
+ return np.array([]), []
21
+ with open(file_path, "rb") as file:
22
+ data = pickle.load(file)
23
+ return np.array(data["encodings"]), data["labels"]
24
+
25
+ # Save encodings to the file
26
+ def save_encodings(encodings, labels, file_path):
27
+ data = {"encodings": encodings, "labels": labels}
28
+ with open(file_path, "wb") as file:
29
+ pickle.dump(data, file)
30
+
31
+ # Detect and align face
32
+ def detect_and_align_face(image):
33
+ detector = MTCNN() # Initialize the MTCNN face detector
34
+ image_rgb = cv2.cvtColor(np.array(image), cv2.COLOR_BGR2RGB) # Convert the image to RGB format
35
+ detections = detector.detect_faces(image_rgb) # Detect faces in the image
36
+
37
+ if len(detections) == 0:
38
+ raise ValueError("No face detected in the image.")
39
+
40
+ detection = detections[0] # Assume the first detected face
41
+ x, y, width, height = detection['box'] # Get the bounding box of the face
42
+ keypoints = detection['keypoints'] # Get facial keypoints (eyes, nose, mouth)
43
+ face = image_rgb[y:y + height, x:x + width] # Extract the face from the image
44
+
45
+ # Calculate the angle to align the face based on eye positions
46
+ left_eye = keypoints['left_eye']
47
+ right_eye = keypoints['right_eye']
48
+ delta_x = right_eye[0] - left_eye[0]
49
+ delta_y = right_eye[1] - left_eye[1]
50
+ angle = np.arctan2(delta_y, delta_x) * (180.0 / np.pi)
51
+
52
+ # Compute the center of the face and create a rotation matrix
53
+ center = ((x + x + width) // 2, (y + y + height) // 2)
54
+ rot_matrix = cv2.getRotationMatrix2D(center, angle, scale=1.0)
55
+
56
+ # Rotate the image to align the face
57
+ aligned_image = cv2.warpAffine(image_rgb, rot_matrix, (image_rgb.shape[1], image_rgb.shape[0]))
58
+ aligned_face = aligned_image[y:y + height, x:x + width] # Extract the aligned face
59
+
60
+ return Image.fromarray(aligned_face) # Convert to PIL Image format and return
61
+
62
+ # Create FAISS index
63
+ def create_faiss_index(known_encodings):
64
+ dimension = known_encodings.shape[1] # Get the dimensionality of the encodings
65
+ index = faiss.IndexFlatL2(dimension) # Create a FAISS index using L2 distance
66
+ index.add(known_encodings) # Add known encodings to the index
67
+ return index # Return the FAISS index
68
+
69
+ # Augment image function
70
+ def augment_image(image, num_augmented=5):
71
+ """
72
+ Apply data augmentation to an image.
73
+
74
+ Parameters:
75
+ image (PIL.Image): The image to augment.
76
+ num_augmented (int): Number of augmented images to generate.
77
+
78
+ Returns:
79
+ List[PIL.Image]: List of augmented images.
80
+ """
81
+ image = np.array(image)
82
+
83
+ # Define a sequence of augmentation techniques
84
+ aug = iaa.Sequential([
85
+ iaa.Fliplr(0.5), # horizontal flips
86
+ iaa.Affine(rotate=(-25, 25)), # rotation
87
+ iaa.AdditiveGaussianNoise(scale=(0, 0.05*255)), # noise
88
+ iaa.Multiply((0.8, 1.2)), # brightness
89
+ iaa.GaussianBlur(sigma=(0.0, 1.0)) # blur
90
+ ])
91
+
92
+ # Generate augmented images
93
+ augmented_images = [Image.fromarray(aug(image=image)) for _ in range(num_augmented)]
94
+ return augmented_images
95
+
96
+ # Endpoint to process and save augmented encodings
97
+ @app.post("/create/")
98
+ async def preprocess_and_save_augmented_encodings(image: UploadFile = File(...), num_augmented: int = 5):
99
+ known_encodings = []
100
+ known_labels = []
101
+
102
+ # Load the uploaded image
103
+ image_bytes = await image.read()
104
+ original_image = Image.open(io.BytesIO(image_bytes)).convert("RGB") # Ensure the image is in RGB format
105
+
106
+ # Augment the image
107
+ augmented_images = augment_image(original_image, num_augmented=num_augmented)
108
+
109
+ # Include the original image in the list of images to encode
110
+ images_to_encode = [original_image] + augmented_images
111
+
112
+ for img in images_to_encode:
113
+ img_array = np.array(img)
114
+ # Encode the face
115
+ encodings = face_recognition.face_encodings(img_array)
116
+ if encodings:
117
+ encoding = encodings[0]
118
+ # Store the encoding and the corresponding label
119
+ known_encodings.append(encoding)
120
+ known_labels.append(image.filename) # Use the uploaded image filename as the label
121
+
122
+ # Save encodings and labels to a file
123
+ encodings_file = "face_encoding.pkl"
124
+ save_encodings(np.array(known_encodings), known_labels, encodings_file)
125
+
126
+ return JSONResponse(content={"status": "Success", "message": "Augmented encodings created and saved."})
127
+
128
+ @app.post("/encode/")
129
+ async def encode_face(image: UploadFile = File(...)):
130
+ # Load the image from the uploaded file
131
+ image_bytes = await image.read()
132
+ pil_image = Image.open(io.BytesIO(image_bytes)).convert("RGB")
133
+
134
+ # Align the face
135
+ try:
136
+ aligned_face = detect_and_align_face(pil_image)
137
+ except ValueError as e:
138
+ raise HTTPException(status_code=400, detail=str(e))
139
+
140
+ # Load existing encodings
141
+ encodings_file = "face_encoding.pkl"
142
+ known_encodings, known_labels = load_encodings(encodings_file)
143
+
144
+ # Encode the face
145
+ encodings = face_recognition.face_encodings(np.array(aligned_face))
146
+ if not encodings:
147
+ raise HTTPException(status_code=400, detail="No face encoding found.")
148
+
149
+ # Append the new encoding and label
150
+ known_encodings = list(known_encodings)
151
+ known_encodings.append(encodings[0])
152
+ known_labels.append(image.filename)
153
+
154
+ # Save the updated encodings
155
+ save_encodings(np.array(known_encodings), known_labels, encodings_file)
156
+
157
+ return JSONResponse(content={"status": "Success", "message": "Face encoded and saved."})
158
+
159
+ @app.post("/match/")
160
+ async def match_face(image: UploadFile = File(...), similarity_threshold: float = 70.0):
161
+ # Load the image from the uploaded file
162
+ image_bytes = await image.read()
163
+ pil_image = Image.open(io.BytesIO(image_bytes)).convert("RGB")
164
+
165
+ # Align the face
166
+ try:
167
+ aligned_face = detect_and_align_face(pil_image)
168
+ except ValueError as e:
169
+ raise HTTPException(status_code=400, detail=str(e))
170
+
171
+ # Load existing encodings
172
+ encodings_file = "face_encoding.pkl"
173
+ known_encodings, known_labels = load_encodings(encodings_file)
174
+
175
+ if len(known_encodings) == 0:
176
+ raise HTTPException(status_code=400, detail="No known faces in the database. Please add some faces first.")
177
+
178
+ # Encode the face
179
+ target_encodings = face_recognition.face_encodings(np.array(aligned_face))
180
+ if not target_encodings:
181
+ raise HTTPException(status_code=400, detail="No face encoding found.")
182
+
183
+ target_encoding = target_encodings[0].reshape(1, -1)
184
+
185
+ # Create FAISS index and search for the best match
186
+ index = create_faiss_index(np.array(known_encodings))
187
+ distances, indices = index.search(target_encoding, 1)
188
+
189
+ best_match_index = indices[0][0]
190
+ best_similarity_percentage = (1 - distances[0][0]) * 100
191
+
192
+ # Default to True (real) for spoof detection
193
+ is_real = True
194
+
195
+ # Perform face spoof detection using DeepFace
196
+ try:
197
+ result = DeepFace.extract_faces(img_path=image.filename, anti_spoofing=True)
198
+ if result and isinstance(result, list):
199
+ is_real = result[0].get('is_real', True)
200
+ except Exception as e:
201
+ # Log the exception if necessary, but do not interrupt the program
202
+ is_real = False # Conservative approach if spoof detection fails
203
+
204
+ return JSONResponse(content={
205
+ "status": "Success",
206
+ "similarity": f"{best_similarity_percentage:.2f}%",
207
+ "is_real": is_real,
208
+ "message": "Face matched successfully" if best_similarity_percentage >= similarity_threshold else "Face not matched"
209
+ })
210
+
211
+ if __name__ == '__main__':
212
+ import uvicorn
213
+ uvicorn.run(app, host='0.0.0.0', port=8000)
deepface/.gitattributes ADDED
@@ -0,0 +1 @@
 
 
1
+ *.ipynb linguist-vendored
deepface/.github/FUNDING.yml ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ github: serengil
2
+ patreon: serengil
3
+ buy_me_a_coffee: serengil
deepface/.github/ISSUE_TEMPLATE/01-report-bug.yaml ADDED
@@ -0,0 +1,86 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ name: '🐛 Report a bug'
2
+ description: 'Use this template to report DeepFace related issues'
3
+ title: '[BUG]: <short description of the issue>'
4
+ labels:
5
+ - bug
6
+ body:
7
+ - type: checkboxes
8
+ id: preliminary-checks
9
+ attributes:
10
+ label: Before You Report a Bug, Please Confirm You Have Done The Following...
11
+ description: If any of these required steps are not taken, we may not be able to review your issue. Help us to help you!
12
+ options:
13
+ - label: I have updated to the latest version of the packages.
14
+ required: true
15
+ - label: I have searched for both [existing issues](https://github.com/serengil/deepface/issues) and [closed issues](https://github.com/serengil/deepface/issues?q=is%3Aissue+is%3Aclosed) and found none that matched my issue.
16
+ required: true
17
+ - type: input
18
+ id: deepface-version
19
+ attributes:
20
+ label: DeepFace's version
21
+ description: |
22
+ Please provide your deepface version with calling the command `python -c "import deepface; print(deepface.__version__)"` in your terminal
23
+ placeholder: e.g. v0.0.90
24
+ validations:
25
+ required: true
26
+ - type: input
27
+ id: python-version
28
+ attributes:
29
+ label: Python version
30
+ description: |
31
+ Please provide your python programming language's version with calling `python --version` in your terminal
32
+ placeholder: e.g. 3.8.5
33
+ validations:
34
+ required: true
35
+ - type: input
36
+ id: os
37
+ attributes:
38
+ label: Operating System
39
+ description: |
40
+ Please provide your operation system's details
41
+ placeholder: e.g. Windows 10 or Ubuntu 20.04
42
+ validations:
43
+ required: false
44
+ - type: textarea
45
+ id: dependencies
46
+ attributes:
47
+ label: Dependencies
48
+ description: |
49
+ Please provide python dependencies with calling `pip freeze` in your terminal, in particular tensorflow's and keras' versions
50
+ validations:
51
+ required: true
52
+ - type: textarea
53
+ id: repro-code
54
+ attributes:
55
+ label: Reproducible example
56
+ description: A ***minimal*** code sample which reproduces the issue
57
+ render: Python
58
+ validations:
59
+ required: true
60
+ - type: textarea
61
+ id: exception-message
62
+ attributes:
63
+ label: Relevant Log Output
64
+ description: Please share the exception message from your terminal if your program is failing
65
+ validations:
66
+ required: false
67
+ - type: textarea
68
+ id: expected
69
+ attributes:
70
+ label: Expected Result
71
+ description: What did you expect to happen?
72
+ validations:
73
+ required: false
74
+ - type: textarea
75
+ id: actual
76
+ attributes:
77
+ label: What happened instead?
78
+ description: What actually happened?
79
+ validations:
80
+ required: false
81
+ - type: textarea
82
+ id: additional
83
+ attributes:
84
+ label: Additional Info
85
+ description: |
86
+ Any additional info you'd like to provide.
deepface/.github/ISSUE_TEMPLATE/02-request-feature.yaml ADDED
@@ -0,0 +1,18 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ name: '✨ Request a New Feature'
2
+ description: 'Use this template to propose a new feature'
3
+ title: '[FEATURE]: <a short description of my proposal>'
4
+ labels:
5
+ - 'enhancement'
6
+ body:
7
+ - type: textarea
8
+ id: description
9
+ attributes:
10
+ label: Description
11
+ description: Explain what your proposed feature would do and why this is useful.
12
+ validations:
13
+ required: true
14
+ - type: textarea
15
+ id: additional
16
+ attributes:
17
+ label: Additional Info
18
+ description: Any additional info you'd like to provide.
deepface/.github/ISSUE_TEMPLATE/03-documentation.yaml ADDED
@@ -0,0 +1,18 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ name: '📝 Documentation'
2
+ description: 'Use this template to add or improve docs'
3
+ title: '[DOC]: <a short description of my proposal>'
4
+ labels:
5
+ - documentation
6
+ body:
7
+ - type: textarea
8
+ attributes:
9
+ label: Suggested Changes
10
+ description: What would you like to see happen in the docs?
11
+ validations:
12
+ required: true
13
+ - type: textarea
14
+ id: additional
15
+ attributes:
16
+ label: Additional Info
17
+ description: |
18
+ Any additional info you'd like to provide.
deepface/.github/ISSUE_TEMPLATE/config.yml ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ blank_issues_enabled: false
2
+ contact_links:
3
+ - name: Ask a question on StackOverflow
4
+ about: If you just want to ask a question, consider asking it on StackOverflow!
5
+ url: https://stackoverflow.com/search?tab=newest&q=deepface
deepface/.github/pull_request_template.md ADDED
@@ -0,0 +1,13 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ## Tickets
2
+
3
+ https://github.com/serengil/deepface/issues/XXX
4
+
5
+ ### What has been done
6
+
7
+ With this PR, ...
8
+
9
+ ## How to test
10
+
11
+ ```shell
12
+ make lint && make test
13
+ ```
deepface/.github/workflows/tests.yml ADDED
@@ -0,0 +1,69 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ name: Tests and Linting
2
+
3
+ on:
4
+ push:
5
+ paths:
6
+ - '.github/workflows/tests.yml'
7
+ - 'deepface/**'
8
+ - 'tests/**'
9
+ - 'api/**'
10
+ - 'requirements.txt'
11
+ - '.gitignore'
12
+ - 'setup.py'
13
+ pull_request:
14
+ paths:
15
+ - '.github/workflows/tests.yml'
16
+ - 'deepface/**'
17
+ - 'tests/**'
18
+ - 'api/**'
19
+ - 'requirements.txt'
20
+ - '.gitignore'
21
+ - 'setup.py'
22
+
23
+ jobs:
24
+ unit-tests:
25
+ runs-on: ubuntu-latest
26
+ strategy:
27
+ matrix:
28
+ python-version: [3.8]
29
+
30
+ steps:
31
+ - uses: actions/checkout@v4
32
+ - name: Set up Python ${{ matrix.python-version }}
33
+ uses: actions/setup-python@v5
34
+ with:
35
+ python-version: ${{ matrix.python-version }}
36
+ - name: Install dependencies
37
+ run: |
38
+ python -m pip install --upgrade pip
39
+ pip install pytest
40
+ pip install .
41
+
42
+ - name: Test with pytest
43
+ run: |
44
+ cd tests
45
+ python -m pytest . -s --disable-warnings
46
+ linting:
47
+ needs: unit-tests
48
+
49
+ runs-on: ubuntu-latest
50
+ strategy:
51
+ matrix:
52
+ python-version: [3.8]
53
+
54
+ steps:
55
+ - uses: actions/checkout@v4
56
+ - name: Set up Python ${{ matrix.python-version }}
57
+ uses: actions/setup-python@v5
58
+ with:
59
+ python-version: ${{ matrix.python-version }}
60
+ - name: Install dependencies
61
+ run: |
62
+ python -m pip install --upgrade pip
63
+ pip install pylint==3.0.2
64
+ pip install black
65
+ pip install .
66
+
67
+ - name: Lint with pylint
68
+ run: |
69
+ pylint --fail-under=10 deepface/
deepface/.gitignore ADDED
@@ -0,0 +1,20 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ **/__pycache__
2
+ **/.DS_Store
3
+ build/
4
+ dist/
5
+ Pipfile
6
+ Pipfile.lock
7
+ .mypy_cache/
8
+ .idea/
9
+ deepface.egg-info/
10
+ tests/dataset/*.pkl
11
+ tests/*.ipynb
12
+ tests/*.csv
13
+ *.pyc
14
+ **/.coverage
15
+ **/.coverage.*
16
+ benchmarks/results
17
+ benchmarks/outputs
18
+ benchmarks/dataset
19
+ benchmarks/lfwe
20
+ venv
deepface/.pylintrc ADDED
@@ -0,0 +1,641 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [MAIN]
2
+
3
+ # Analyse import fallback blocks. This can be used to support both Python 2 and
4
+ # 3 compatible code, which means that the block might have code that exists
5
+ # only in one or another interpreter, leading to false positives when analysed.
6
+ analyse-fallback-blocks=no
7
+
8
+ # Load and enable all available extensions. Use --list-extensions to see a list
9
+ # all available extensions.
10
+ #enable-all-extensions=
11
+
12
+ # In error mode, messages with a category besides ERROR or FATAL are
13
+ # suppressed, and no reports are done by default. Error mode is compatible with
14
+ # disabling specific errors.
15
+ #errors-only=
16
+
17
+ # Always return a 0 (non-error) status code, even if lint errors are found.
18
+ # This is primarily useful in continuous integration scripts.
19
+ #exit-zero=
20
+
21
+ # A comma-separated list of package or module names from where C extensions may
22
+ # be loaded. Extensions are loading into the active Python interpreter and may
23
+ # run arbitrary code.
24
+ extension-pkg-allow-list=
25
+
26
+ # A comma-separated list of package or module names from where C extensions may
27
+ # be loaded. Extensions are loading into the active Python interpreter and may
28
+ # run arbitrary code. (This is an alternative name to extension-pkg-allow-list
29
+ # for backward compatibility.)
30
+ extension-pkg-whitelist=
31
+
32
+ # Return non-zero exit code if any of these messages/categories are detected,
33
+ # even if score is above --fail-under value. Syntax same as enable. Messages
34
+ # specified are enabled, while categories only check already-enabled messages.
35
+ fail-on=
36
+
37
+ # Specify a score threshold under which the program will exit with error.
38
+ fail-under=10
39
+
40
+ # Interpret the stdin as a python script, whose filename needs to be passed as
41
+ # the module_or_package argument.
42
+ #from-stdin=
43
+
44
+ # Files or directories to be skipped. They should be base names, not paths.
45
+ ignore=CVS
46
+
47
+ # Add files or directories matching the regular expressions patterns to the
48
+ # ignore-list. The regex matches against paths and can be in Posix or Windows
49
+ # format. Because '\' represents the directory delimiter on Windows systems, it
50
+ # can't be used as an escape character.
51
+ ignore-paths=
52
+
53
+ # Files or directories matching the regular expression patterns are skipped.
54
+ # The regex matches against base names, not paths. The default value ignores
55
+ # Emacs file locks
56
+ ignore-patterns=^\.#
57
+
58
+ # List of module names for which member attributes should not be checked
59
+ # (useful for modules/projects where namespaces are manipulated during runtime
60
+ # and thus existing member attributes cannot be deduced by static analysis). It
61
+ # supports qualified module names, as well as Unix pattern matching.
62
+ ignored-modules=
63
+
64
+ # Python code to execute, usually for sys.path manipulation such as
65
+ # pygtk.require().
66
+ #init-hook=
67
+
68
+ # Use multiple processes to speed up Pylint. Specifying 0 will auto-detect the
69
+ # number of processors available to use, and will cap the count on Windows to
70
+ # avoid hangs.
71
+ jobs=1
72
+
73
+ # Control the amount of potential inferred values when inferring a single
74
+ # object. This can help the performance when dealing with large functions or
75
+ # complex, nested conditions.
76
+ limit-inference-results=100
77
+
78
+ # List of plugins (as comma separated values of python module names) to load,
79
+ # usually to register additional checkers.
80
+ load-plugins=
81
+
82
+ # Pickle collected data for later comparisons.
83
+ persistent=yes
84
+
85
+ # Minimum Python version to use for version dependent checks. Will default to
86
+ # the version used to run pylint.
87
+ py-version=3.9
88
+
89
+ # Discover python modules and packages in the file system subtree.
90
+ recursive=no
91
+
92
+ # When enabled, pylint would attempt to guess common misconfiguration and emit
93
+ # user-friendly hints instead of false-positive error messages.
94
+ suggestion-mode=yes
95
+
96
+ # Allow loading of arbitrary C extensions. Extensions are imported into the
97
+ # active Python interpreter and may run arbitrary code.
98
+ unsafe-load-any-extension=no
99
+
100
+ # In verbose mode, extra non-checker-related info will be displayed.
101
+ #verbose=
102
+
103
+
104
+ [BASIC]
105
+
106
+ # Naming style matching correct argument names.
107
+ argument-naming-style=snake_case
108
+
109
+ # Regular expression matching correct argument names. Overrides argument-
110
+ # naming-style. If left empty, argument names will be checked with the set
111
+ # naming style.
112
+ #argument-rgx=
113
+
114
+ # Naming style matching correct attribute names.
115
+ attr-naming-style=snake_case
116
+
117
+ # Regular expression matching correct attribute names. Overrides attr-naming-
118
+ # style. If left empty, attribute names will be checked with the set naming
119
+ # style.
120
+ #attr-rgx=
121
+
122
+ # Bad variable names which should always be refused, separated by a comma.
123
+ bad-names=foo,
124
+ bar,
125
+ baz,
126
+ toto,
127
+ tutu,
128
+ tata
129
+
130
+ # Bad variable names regexes, separated by a comma. If names match any regex,
131
+ # they will always be refused
132
+ bad-names-rgxs=
133
+
134
+ # Naming style matching correct class attribute names.
135
+ class-attribute-naming-style=any
136
+
137
+ # Regular expression matching correct class attribute names. Overrides class-
138
+ # attribute-naming-style. If left empty, class attribute names will be checked
139
+ # with the set naming style.
140
+ #class-attribute-rgx=
141
+
142
+ # Naming style matching correct class constant names.
143
+ class-const-naming-style=UPPER_CASE
144
+
145
+ # Regular expression matching correct class constant names. Overrides class-
146
+ # const-naming-style. If left empty, class constant names will be checked with
147
+ # the set naming style.
148
+ #class-const-rgx=
149
+
150
+ # Naming style matching correct class names.
151
+ class-naming-style=PascalCase
152
+
153
+ # Regular expression matching correct class names. Overrides class-naming-
154
+ # style. If left empty, class names will be checked with the set naming style.
155
+ #class-rgx=
156
+
157
+ # Naming style matching correct constant names.
158
+ const-naming-style=UPPER_CASE
159
+
160
+ # Regular expression matching correct constant names. Overrides const-naming-
161
+ # style. If left empty, constant names will be checked with the set naming
162
+ # style.
163
+ #const-rgx=
164
+
165
+ # Minimum line length for functions/classes that require docstrings, shorter
166
+ # ones are exempt.
167
+ docstring-min-length=-1
168
+
169
+ # Naming style matching correct function names.
170
+ function-naming-style=snake_case
171
+
172
+ # Regular expression matching correct function names. Overrides function-
173
+ # naming-style. If left empty, function names will be checked with the set
174
+ # naming style.
175
+ #function-rgx=
176
+
177
+ # Good variable names which should always be accepted, separated by a comma.
178
+ good-names=i,
179
+ j,
180
+ k,
181
+ ex,
182
+ Run,
183
+ _
184
+
185
+ # Good variable names regexes, separated by a comma. If names match any regex,
186
+ # they will always be accepted
187
+ good-names-rgxs=
188
+
189
+ # Include a hint for the correct naming format with invalid-name.
190
+ include-naming-hint=no
191
+
192
+ # Naming style matching correct inline iteration names.
193
+ inlinevar-naming-style=any
194
+
195
+ # Regular expression matching correct inline iteration names. Overrides
196
+ # inlinevar-naming-style. If left empty, inline iteration names will be checked
197
+ # with the set naming style.
198
+ #inlinevar-rgx=
199
+
200
+ # Naming style matching correct method names.
201
+ method-naming-style=snake_case
202
+
203
+ # Regular expression matching correct method names. Overrides method-naming-
204
+ # style. If left empty, method names will be checked with the set naming style.
205
+ #method-rgx=
206
+
207
+ # Naming style matching correct module names.
208
+ module-naming-style=snake_case
209
+
210
+ # Regular expression matching correct module names. Overrides module-naming-
211
+ # style. If left empty, module names will be checked with the set naming style.
212
+ #module-rgx=
213
+
214
+ # Colon-delimited sets of names that determine each other's naming style when
215
+ # the name regexes allow several styles.
216
+ name-group=
217
+
218
+ # Regular expression which should only match function or class names that do
219
+ # not require a docstring.
220
+ no-docstring-rgx=^_
221
+
222
+ # List of decorators that produce properties, such as abc.abstractproperty. Add
223
+ # to this list to register other decorators that produce valid properties.
224
+ # These decorators are taken in consideration only for invalid-name.
225
+ property-classes=abc.abstractproperty
226
+
227
+ # Regular expression matching correct type variable names. If left empty, type
228
+ # variable names will be checked with the set naming style.
229
+ #typevar-rgx=
230
+
231
+ # Naming style matching correct variable names.
232
+ variable-naming-style=snake_case
233
+
234
+ # Regular expression matching correct variable names. Overrides variable-
235
+ # naming-style. If left empty, variable names will be checked with the set
236
+ # naming style.
237
+ #variable-rgx=
238
+
239
+
240
+ [CLASSES]
241
+
242
+ # Warn about protected attribute access inside special methods
243
+ check-protected-access-in-special-methods=no
244
+
245
+ # List of method names used to declare (i.e. assign) instance attributes.
246
+ defining-attr-methods=__init__,
247
+ __new__,
248
+ setUp,
249
+ __post_init__
250
+
251
+ # List of member names, which should be excluded from the protected access
252
+ # warning.
253
+ exclude-protected=_asdict,
254
+ _fields,
255
+ _replace,
256
+ _source,
257
+ _make
258
+
259
+ # List of valid names for the first argument in a class method.
260
+ valid-classmethod-first-arg=cls
261
+
262
+ # List of valid names for the first argument in a metaclass class method.
263
+ valid-metaclass-classmethod-first-arg=cls
264
+
265
+
266
+ [DESIGN]
267
+
268
+ # List of regular expressions of class ancestor names to ignore when counting
269
+ # public methods (see R0903)
270
+ exclude-too-few-public-methods=
271
+
272
+ # List of qualified class names to ignore when counting class parents (see
273
+ # R0901)
274
+ ignored-parents=
275
+
276
+ # Maximum number of arguments for function / method.
277
+ max-args=5
278
+
279
+ # Maximum number of attributes for a class (see R0902).
280
+ max-attributes=7
281
+
282
+ # Maximum number of boolean expressions in an if statement (see R0916).
283
+ max-bool-expr=5
284
+
285
+ # Maximum number of branch for function / method body.
286
+ max-branches=12
287
+
288
+ # Maximum number of locals for function / method body.
289
+ max-locals=15
290
+
291
+ # Maximum number of parents for a class (see R0901).
292
+ max-parents=7
293
+
294
+ # Maximum number of public methods for a class (see R0904).
295
+ max-public-methods=20
296
+
297
+ # Maximum number of return / yield for function / method body.
298
+ max-returns=6
299
+
300
+ # Maximum number of statements in function / method body.
301
+ max-statements=50
302
+
303
+ # Minimum number of public methods for a class (see R0903).
304
+ min-public-methods=2
305
+
306
+
307
+ [EXCEPTIONS]
308
+
309
+ # Exceptions that will emit a warning when caught.
310
+ overgeneral-exceptions=BaseException,
311
+ Exception
312
+
313
+
314
+ [FORMAT]
315
+
316
+ # Expected format of line ending, e.g. empty (any line ending), LF or CRLF.
317
+ expected-line-ending-format=
318
+
319
+ # Regexp for a line that is allowed to be longer than the limit.
320
+ ignore-long-lines=^\s*(# )?<?https?://\S+>?$
321
+
322
+ # Number of spaces of indent required inside a hanging or continued line.
323
+ indent-after-paren=4
324
+
325
+ # String used as indentation unit. This is usually " " (4 spaces) or "\t" (1
326
+ # tab).
327
+ indent-string=' '
328
+
329
+ # Maximum number of characters on a single line.
330
+ max-line-length=100
331
+
332
+ # Maximum number of lines in a module.
333
+ max-module-lines=1000
334
+
335
+ # Allow the body of a class to be on the same line as the declaration if body
336
+ # contains single statement.
337
+ single-line-class-stmt=no
338
+
339
+ # Allow the body of an if to be on the same line as the test if there is no
340
+ # else.
341
+ single-line-if-stmt=no
342
+
343
+
344
+ [IMPORTS]
345
+
346
+ # List of modules that can be imported at any level, not just the top level
347
+ # one.
348
+ allow-any-import-level=
349
+
350
+ # Allow wildcard imports from modules that define __all__.
351
+ allow-wildcard-with-all=no
352
+
353
+ # Deprecated modules which should not be used, separated by a comma.
354
+ deprecated-modules=
355
+
356
+ # Output a graph (.gv or any supported image format) of external dependencies
357
+ # to the given file (report RP0402 must not be disabled).
358
+ ext-import-graph=
359
+
360
+ # Output a graph (.gv or any supported image format) of all (i.e. internal and
361
+ # external) dependencies to the given file (report RP0402 must not be
362
+ # disabled).
363
+ import-graph=
364
+
365
+ # Output a graph (.gv or any supported image format) of internal dependencies
366
+ # to the given file (report RP0402 must not be disabled).
367
+ int-import-graph=
368
+
369
+ # Force import order to recognize a module as part of the standard
370
+ # compatibility libraries.
371
+ known-standard-library=
372
+
373
+ # Force import order to recognize a module as part of a third party library.
374
+ known-third-party=enchant
375
+
376
+ # Couples of modules and preferred modules, separated by a comma.
377
+ preferred-modules=
378
+
379
+
380
+ [LOGGING]
381
+
382
+ # The type of string formatting that logging methods do. `old` means using %
383
+ # formatting, `new` is for `{}` formatting.
384
+ logging-format-style=old
385
+
386
+ # Logging modules to check that the string format arguments are in logging
387
+ # function parameter format.
388
+ logging-modules=logging
389
+
390
+
391
+ [MESSAGES CONTROL]
392
+
393
+ # Only show warnings with the listed confidence levels. Leave empty to show
394
+ # all. Valid levels: HIGH, CONTROL_FLOW, INFERENCE, INFERENCE_FAILURE,
395
+ # UNDEFINED.
396
+ confidence=HIGH,
397
+ CONTROL_FLOW,
398
+ INFERENCE,
399
+ INFERENCE_FAILURE,
400
+ UNDEFINED
401
+
402
+ # Disable the message, report, category or checker with the given id(s). You
403
+ # can either give multiple identifiers separated by comma (,) or put this
404
+ # option multiple times (only on the command line, not in the configuration
405
+ # file where it should appear only once). You can also use "--disable=all" to
406
+ # disable everything first and then re-enable specific checks. For example, if
407
+ # you want to run only the similarities checker, you can use "--disable=all
408
+ # --enable=similarities". If you want to run only the classes checker, but have
409
+ # no Warning level messages displayed, use "--disable=all --enable=classes
410
+ # --disable=W".
411
+ disable=raw-checker-failed,
412
+ bad-inline-option,
413
+ locally-disabled,
414
+ file-ignored,
415
+ suppressed-message,
416
+ useless-suppression,
417
+ deprecated-pragma,
418
+ use-symbolic-message-instead,
419
+ import-error,
420
+ invalid-name,
421
+ missing-module-docstring,
422
+ missing-function-docstring,
423
+ missing-class-docstring,
424
+ too-many-arguments,
425
+ too-many-locals,
426
+ too-many-branches,
427
+ too-many-statements,
428
+ global-variable-undefined,
429
+ import-outside-toplevel,
430
+ singleton-comparison,
431
+ too-many-lines,
432
+ duplicate-code,
433
+ bare-except,
434
+ cyclic-import,
435
+ global-statement,
436
+ no-member,
437
+ no-name-in-module,
438
+ unrecognized-option,
439
+ consider-using-dict-items,
440
+ consider-iterating-dictionary,
441
+ unexpected-keyword-arg
442
+
443
+ # Enable the message, report, category or checker with the given id(s). You can
444
+ # either give multiple identifier separated by comma (,) or put this option
445
+ # multiple time (only on the command line, not in the configuration file where
446
+ # it should appear only once). See also the "--disable" option for examples.
447
+ enable=c-extension-no-member
448
+
449
+
450
+ [METHOD_ARGS]
451
+
452
+ # List of qualified names (i.e., library.method) which require a timeout
453
+ # parameter e.g. 'requests.api.get,requests.api.post'
454
+ timeout-methods=requests.api.delete,requests.api.get,requests.api.head,requests.api.options,requests.api.patch,requests.api.post,requests.api.put,requests.api.request
455
+
456
+
457
+ [MISCELLANEOUS]
458
+
459
+ # List of note tags to take in consideration, separated by a comma.
460
+ notes=FIXME,
461
+ XXX,
462
+ TODO
463
+
464
+ # Regular expression of note tags to take in consideration.
465
+ notes-rgx=
466
+
467
+
468
+ [REFACTORING]
469
+
470
+ # Maximum number of nested blocks for function / method body
471
+ max-nested-blocks=5
472
+
473
+ # Complete name of functions that never returns. When checking for
474
+ # inconsistent-return-statements if a never returning function is called then
475
+ # it will be considered as an explicit return statement and no message will be
476
+ # printed.
477
+ never-returning-functions=sys.exit,argparse.parse_error
478
+
479
+
480
+ [REPORTS]
481
+
482
+ # Python expression which should return a score less than or equal to 10. You
483
+ # have access to the variables 'fatal', 'error', 'warning', 'refactor',
484
+ # 'convention', and 'info' which contain the number of messages in each
485
+ # category, as well as 'statement' which is the total number of statements
486
+ # analyzed. This score is used by the global evaluation report (RP0004).
487
+ evaluation=max(0, 0 if fatal else 10.0 - ((float(5 * error + warning + refactor + convention) / statement) * 10))
488
+
489
+ # Template used to display messages. This is a python new-style format string
490
+ # used to format the message information. See doc for all details.
491
+ msg-template=
492
+
493
+ # Set the output format. Available formats are text, parseable, colorized, json
494
+ # and msvs (visual studio). You can also give a reporter class, e.g.
495
+ # mypackage.mymodule.MyReporterClass.
496
+ #output-format=
497
+
498
+ # Tells whether to display a full report or only the messages.
499
+ reports=no
500
+
501
+ # Activate the evaluation score.
502
+ score=yes
503
+
504
+
505
+ [SIMILARITIES]
506
+
507
+ # Comments are removed from the similarity computation
508
+ ignore-comments=yes
509
+
510
+ # Docstrings are removed from the similarity computation
511
+ ignore-docstrings=yes
512
+
513
+ # Imports are removed from the similarity computation
514
+ ignore-imports=yes
515
+
516
+ # Signatures are removed from the similarity computation
517
+ ignore-signatures=yes
518
+
519
+ # Minimum lines number of a similarity.
520
+ min-similarity-lines=4
521
+
522
+
523
+ [SPELLING]
524
+
525
+ # Limits count of emitted suggestions for spelling mistakes.
526
+ max-spelling-suggestions=4
527
+
528
+ # Spelling dictionary name. Available dictionaries: none. To make it work,
529
+ # install the 'python-enchant' package.
530
+ spelling-dict=
531
+
532
+ # List of comma separated words that should be considered directives if they
533
+ # appear at the beginning of a comment and should not be checked.
534
+ spelling-ignore-comment-directives=fmt: on,fmt: off,noqa:,noqa,nosec,isort:skip,mypy:
535
+
536
+ # List of comma separated words that should not be checked.
537
+ spelling-ignore-words=
538
+
539
+ # A path to a file that contains the private dictionary; one word per line.
540
+ spelling-private-dict-file=
541
+
542
+ # Tells whether to store unknown words to the private dictionary (see the
543
+ # --spelling-private-dict-file option) instead of raising a message.
544
+ spelling-store-unknown-words=no
545
+
546
+
547
+ [STRING]
548
+
549
+ # This flag controls whether inconsistent-quotes generates a warning when the
550
+ # character used as a quote delimiter is used inconsistently within a module.
551
+ check-quote-consistency=no
552
+
553
+ # This flag controls whether the implicit-str-concat should generate a warning
554
+ # on implicit string concatenation in sequences defined over several lines.
555
+ check-str-concat-over-line-jumps=no
556
+
557
+
558
+ [TYPECHECK]
559
+
560
+ # List of decorators that produce context managers, such as
561
+ # contextlib.contextmanager. Add to this list to register other decorators that
562
+ # produce valid context managers.
563
+ contextmanager-decorators=contextlib.contextmanager
564
+
565
+ # List of members which are set dynamically and missed by pylint inference
566
+ # system, and so shouldn't trigger E1101 when accessed. Python regular
567
+ # expressions are accepted.
568
+ generated-members=
569
+
570
+ # Tells whether to warn about missing members when the owner of the attribute
571
+ # is inferred to be None.
572
+ ignore-none=yes
573
+
574
+ # This flag controls whether pylint should warn about no-member and similar
575
+ # checks whenever an opaque object is returned when inferring. The inference
576
+ # can return multiple potential results while evaluating a Python object, but
577
+ # some branches might not be evaluated, which results in partial inference. In
578
+ # that case, it might be useful to still emit no-member and other checks for
579
+ # the rest of the inferred objects.
580
+ ignore-on-opaque-inference=yes
581
+
582
+ # List of symbolic message names to ignore for Mixin members.
583
+ ignored-checks-for-mixins=no-member,
584
+ not-async-context-manager,
585
+ not-context-manager,
586
+ attribute-defined-outside-init
587
+
588
+ # List of class names for which member attributes should not be checked (useful
589
+ # for classes with dynamically set attributes). This supports the use of
590
+ # qualified names.
591
+ ignored-classes=optparse.Values,thread._local,_thread._local,argparse.Namespace
592
+
593
+ # Show a hint with possible names when a member name was not found. The aspect
594
+ # of finding the hint is based on edit distance.
595
+ missing-member-hint=yes
596
+
597
+ # The minimum edit distance a name should have in order to be considered a
598
+ # similar match for a missing member name.
599
+ missing-member-hint-distance=1
600
+
601
+ # The total number of similar names that should be taken in consideration when
602
+ # showing a hint for a missing member.
603
+ missing-member-max-choices=1
604
+
605
+ # Regex pattern to define which classes are considered mixins.
606
+ mixin-class-rgx=.*[Mm]ixin
607
+
608
+ # List of decorators that change the signature of a decorated function.
609
+ signature-mutators=
610
+
611
+
612
+ [VARIABLES]
613
+
614
+ # List of additional names supposed to be defined in builtins. Remember that
615
+ # you should avoid defining new builtins when possible.
616
+ additional-builtins=
617
+
618
+ # Tells whether unused global variables should be treated as a violation.
619
+ allow-global-unused-variables=yes
620
+
621
+ # List of names allowed to shadow builtins
622
+ allowed-redefined-builtins=
623
+
624
+ # List of strings which can identify a callback function by name. A callback
625
+ # name must start or end with one of those strings.
626
+ callbacks=cb_,
627
+ _cb
628
+
629
+ # A regular expression matching the name of dummy variables (i.e. expected to
630
+ # not be used).
631
+ dummy-variables-rgx=_+$|(_[a-zA-Z0-9_]*[a-zA-Z0-9]+?$)|dummy|^ignored_|^unused_
632
+
633
+ # Argument names that match this expression will be ignored.
634
+ ignored-argument-names=_.*|^ignored_|^unused_
635
+
636
+ # Tells whether we should check for unused import in __init__ files.
637
+ init-import=no
638
+
639
+ # List of qualified module names which can have objects that can redefine
640
+ # builtins.
641
+ redefining-builtins-modules=six.moves,past.builtins,future.builtins,builtins,io
deepface/.vscode/settings.json ADDED
@@ -0,0 +1,18 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "python.linting.pylintEnabled": true,
3
+ "python.linting.enabled": true,
4
+ "python.linting.pylintUseMinimalCheckers": false,
5
+ "editor.formatOnSave": true,
6
+ "editor.renderWhitespace": "all",
7
+ "files.autoSave": "afterDelay",
8
+ "python.analysis.typeCheckingMode": "basic",
9
+ "python.formatting.provider": "black",
10
+ "python.formatting.blackArgs": ["--line-length=100"],
11
+ "editor.fontWeight": "normal",
12
+ "python.analysis.extraPaths": [
13
+ "./deepface"
14
+ ],
15
+ "black-formatter.args": [
16
+ "--line-length=100"
17
+ ]
18
+ }
deepface/CITATION.md ADDED
@@ -0,0 +1,71 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ## Cite DeepFace Papers
2
+
3
+ Please cite deepface in your publications if it helps your research. Here are its BibTex entries:
4
+
5
+ ### Facial Recognition
6
+
7
+ If you use deepface in your research for facial recogntion purposes, please cite these publications:
8
+
9
+ ```BibTeX
10
+ @article{serengil2024lightface,
11
+ title = {A Benchmark of Facial Recognition Pipelines and Co-Usability Performances of Modules},
12
+ author = {Serengil, Sefik Ilkin and Ozpinar, Alper},
13
+ journal = {Bilisim Teknolojileri Dergisi},
14
+ volume = {17},
15
+ number = {2},
16
+ pages = {95-107},
17
+ year = {2024},
18
+ doi = {10.17671/gazibtd.1399077},
19
+ url = {https://dergipark.org.tr/en/pub/gazibtd/issue/84331/1399077},
20
+ publisher = {Gazi University}
21
+ }
22
+ ```
23
+
24
+ ```BibTeX
25
+ @inproceedings{serengil2020lightface,
26
+ title = {LightFace: A Hybrid Deep Face Recognition Framework},
27
+ author = {Serengil, Sefik Ilkin and Ozpinar, Alper},
28
+ booktitle = {2020 Innovations in Intelligent Systems and Applications Conference (ASYU)},
29
+ pages = {23-27},
30
+ year = {2020},
31
+ doi = {10.1109/ASYU50717.2020.9259802},
32
+ url = {https://ieeexplore.ieee.org/document/9259802},
33
+ organization = {IEEE}
34
+ }
35
+ ```
36
+
37
+ ### Facial Attribute Analysis
38
+
39
+ If you use deepface in your research for facial attribute analysis purposes such as age, gender, emotion or ethnicity prediction, please cite the this publication.
40
+
41
+ ```BibTeX
42
+ @inproceedings{serengil2021lightface,
43
+ title = {HyperExtended LightFace: A Facial Attribute Analysis Framework},
44
+ author = {Serengil, Sefik Ilkin and Ozpinar, Alper},
45
+ booktitle = {2021 International Conference on Engineering and Emerging Technologies (ICEET)},
46
+ pages = {1-4},
47
+ year = {2021},
48
+ doi = {10.1109/ICEET53442.2021.9659697},
49
+ url = {https://ieeexplore.ieee.org/document/9659697/},
50
+ organization = {IEEE}
51
+ }
52
+ ```
53
+
54
+ ### Additional Papers
55
+
56
+ We have additionally released these papers within the DeepFace project for a multitude of purposes.
57
+
58
+ ```BibTeX
59
+ @misc{serengil2023db,
60
+ title = {An evaluation of sql and nosql databases for facial recognition pipelines},
61
+ author = {Serengil, Sefik Ilkin and Ozpinar, Alper},
62
+ year = {2023},
63
+ archivePrefix = {Cambridge Open Engage},
64
+ doi = {10.33774/coe-2023-18rcn},
65
+ url = {https://www.cambridge.org/engage/coe/article-details/63f3e5541d2d184063d4f569}
66
+ }
67
+ ```
68
+
69
+ ### Repositories
70
+
71
+ Also, if you use deepface in your GitHub projects, please add `deepface` in the `requirements.txt`. Thereafter, your project will be listed in its [dependency graph](https://github.com/serengil/deepface/network/dependents).
deepface/Dockerfile ADDED
@@ -0,0 +1,58 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # base image
2
+ FROM python:3.8.12
3
+ LABEL org.opencontainers.image.source https://github.com/serengil/deepface
4
+
5
+ # -----------------------------------
6
+ # create required folder
7
+ RUN mkdir /app
8
+ RUN mkdir /app/deepface
9
+
10
+ # -----------------------------------
11
+ # switch to application directory
12
+ WORKDIR /app
13
+
14
+ # -----------------------------------
15
+ # update image os
16
+ RUN apt-get update
17
+ RUN apt-get install ffmpeg libsm6 libxext6 -y
18
+
19
+ # -----------------------------------
20
+ # Copy required files from repo into image
21
+ COPY ./deepface /app/deepface
22
+ # even though we will use local requirements, this one is required to perform install deepface from source code
23
+ COPY ./requirements.txt /app/requirements.txt
24
+ COPY ./requirements_local /app/requirements_local.txt
25
+ COPY ./package_info.json /app/
26
+ COPY ./setup.py /app/
27
+ COPY ./README.md /app/
28
+
29
+ # -----------------------------------
30
+ # if you plan to use a GPU, you should install the 'tensorflow-gpu' package
31
+ # RUN pip install --trusted-host pypi.org --trusted-host pypi.python.org --trusted-host=files.pythonhosted.org tensorflow-gpu
32
+
33
+ # if you plan to use face anti-spoofing, then activate this line
34
+ # RUN pip install --trusted-host pypi.org --trusted-host pypi.python.org --trusted-host=files.pythonhosted.org torch==2.1.2
35
+ # -----------------------------------
36
+ # install deepface from pypi release (might be out-of-date)
37
+ # RUN pip install --trusted-host pypi.org --trusted-host pypi.python.org --trusted-host=files.pythonhosted.org deepface
38
+ # -----------------------------------
39
+ # install dependencies - deepface with these dependency versions is working
40
+ RUN pip install --trusted-host pypi.org --trusted-host pypi.python.org --trusted-host=files.pythonhosted.org -r /app/requirements_local.txt
41
+ # install deepface from source code (always up-to-date)
42
+ RUN pip install --trusted-host pypi.org --trusted-host pypi.python.org --trusted-host=files.pythonhosted.org -e .
43
+
44
+ # -----------------------------------
45
+ # some packages are optional in deepface. activate if your task depends on one.
46
+ # RUN pip install --trusted-host pypi.org --trusted-host pypi.python.org --trusted-host=files.pythonhosted.org cmake==3.24.1.1
47
+ # RUN pip install --trusted-host pypi.org --trusted-host pypi.python.org --trusted-host=files.pythonhosted.org dlib==19.20.0
48
+ # RUN pip install --trusted-host pypi.org --trusted-host pypi.python.org --trusted-host=files.pythonhosted.org lightgbm==2.3.1
49
+
50
+ # -----------------------------------
51
+ # environment variables
52
+ ENV PYTHONUNBUFFERED=1
53
+
54
+ # -----------------------------------
55
+ # run the app (re-configure port if necessary)
56
+ WORKDIR /app/deepface/api/src
57
+ EXPOSE 5000
58
+ CMD ["gunicorn", "--workers=1", "--timeout=3600", "--bind=0.0.0.0:5000", "app:create_app()"]
deepface/LICENSE ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ MIT License
2
+
3
+ Copyright (c) 2019 Sefik Ilkin Serengil
4
+
5
+ Permission is hereby granted, free of charge, to any person obtaining a copy
6
+ of this software and associated documentation files (the "Software"), to deal
7
+ in the Software without restriction, including without limitation the rights
8
+ to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9
+ copies of the Software, and to permit persons to whom the Software is
10
+ furnished to do so, subject to the following conditions:
11
+
12
+ The above copyright notice and this permission notice shall be included in all
13
+ copies or substantial portions of the Software.
14
+
15
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16
+ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18
+ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19
+ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20
+ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21
+ SOFTWARE.
deepface/Makefile ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ test:
2
+ cd tests && python -m pytest . -s --disable-warnings
3
+
4
+ lint:
5
+ python -m pylint deepface/ --fail-under=10
6
+
7
+ coverage:
8
+ pip install pytest-cov && cd tests && python -m pytest --cov=deepface
deepface/README.md ADDED
@@ -0,0 +1,461 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # deepface
2
+
3
+ <div align="center">
4
+
5
+ [![Downloads](https://static.pepy.tech/personalized-badge/deepface?period=total&units=international_system&left_color=grey&right_color=blue&left_text=downloads)](https://pepy.tech/project/deepface)
6
+ [![Stars](https://img.shields.io/github/stars/serengil/deepface?color=yellow&style=flat&label=%E2%AD%90%20stars)](https://github.com/serengil/deepface/stargazers)
7
+ [![License](http://img.shields.io/:license-MIT-green.svg?style=flat)](https://github.com/serengil/deepface/blob/master/LICENSE)
8
+ [![Tests](https://github.com/serengil/deepface/actions/workflows/tests.yml/badge.svg)](https://github.com/serengil/deepface/actions/workflows/tests.yml)
9
+ [![DOI](http://img.shields.io/:DOI-10.17671/gazibtd.1399077-blue.svg?style=flat)](https://doi.org/10.17671/gazibtd.1399077)
10
+
11
+ [![Blog](https://img.shields.io/:blog-sefiks.com-blue.svg?style=flat&logo=wordpress)](https://sefiks.com)
12
+ [![YouTube](https://img.shields.io/:[email protected]?style=flat&logo=youtube)](https://www.youtube.com/@sefiks?sub_confirmation=1)
13
+ [![Twitter](https://img.shields.io/:[email protected]?style=flat&logo=x)](https://twitter.com/intent/user?screen_name=serengil)
14
+
15
+ [![Support me on Patreon](https://img.shields.io/endpoint.svg?url=https%3A%2F%2Fshieldsio-patreon.vercel.app%2Fapi%3Fusername%3Dserengil%26type%3Dpatrons&style=flat)](https://www.patreon.com/serengil?repo=deepface)
16
+ [![GitHub Sponsors](https://img.shields.io/github/sponsors/serengil?logo=GitHub&color=lightgray)](https://github.com/sponsors/serengil)
17
+ [![Buy Me a Coffee](https://img.shields.io/badge/-buy_me_a%C2%A0coffee-gray?logo=buy-me-a-coffee)](https://buymeacoffee.com/serengil)
18
+
19
+ <!-- [![DOI](http://img.shields.io/:DOI-10.1109/ICEET53442.2021.9659697-blue.svg?style=flat)](https://doi.org/10.1109/ICEET53442.2021.9659697) -->
20
+ <!-- [![DOI](http://img.shields.io/:DOI-10.1109/ASYU50717.2020.9259802-blue.svg?style=flat)](https://doi.org/10.1109/ASYU50717.2020.9259802) -->
21
+
22
+ </div>
23
+
24
+ <p align="center"><img src="https://raw.githubusercontent.com/serengil/deepface/master/icon/deepface-icon-labeled.png" width="200" height="240"></p>
25
+
26
+ DeepFace is a lightweight [face recognition](https://sefiks.com/2018/08/06/deep-face-recognition-with-keras/) and facial attribute analysis ([age](https://sefiks.com/2019/02/13/apparent-age-and-gender-prediction-in-keras/), [gender](https://sefiks.com/2019/02/13/apparent-age-and-gender-prediction-in-keras/), [emotion](https://sefiks.com/2018/01/01/facial-expression-recognition-with-keras/) and [race](https://sefiks.com/2019/11/11/race-and-ethnicity-prediction-in-keras/)) framework for python. It is a hybrid face recognition framework wrapping **state-of-the-art** models: [`VGG-Face`](https://sefiks.com/2018/08/06/deep-face-recognition-with-keras/), [`FaceNet`](https://sefiks.com/2018/09/03/face-recognition-with-facenet-in-keras/), [`OpenFace`](https://sefiks.com/2019/07/21/face-recognition-with-openface-in-keras/), [`DeepFace`](https://sefiks.com/2020/02/17/face-recognition-with-facebook-deepface-in-keras/), [`DeepID`](https://sefiks.com/2020/06/16/face-recognition-with-deepid-in-keras/), [`ArcFace`](https://sefiks.com/2020/12/14/deep-face-recognition-with-arcface-in-keras-and-python/), [`Dlib`](https://sefiks.com/2020/07/11/face-recognition-with-dlib-in-python/), `SFace` and `GhostFaceNet`.
27
+
28
+ [`Experiments`](https://github.com/serengil/deepface/tree/master/benchmarks) show that **human beings have 97.53% accuracy** on facial recognition tasks whereas those models already reached and passed that accuracy level.
29
+
30
+ ## Installation [![PyPI](https://img.shields.io/pypi/v/deepface.svg)](https://pypi.org/project/deepface/)
31
+
32
+ The easiest way to install deepface is to download it from [`PyPI`](https://pypi.org/project/deepface/). It's going to install the library itself and its prerequisites as well.
33
+
34
+ ```shell
35
+ $ pip install deepface
36
+ ```
37
+
38
+ Alternatively, you can also install deepface from its source code. Source code may have new features not published in pip release yet.
39
+
40
+ ```shell
41
+ $ git clone https://github.com/serengil/deepface.git
42
+ $ cd deepface
43
+ $ pip install -e .
44
+ ```
45
+
46
+ Once you installed the library, then you will be able to import it and use its functionalities.
47
+
48
+ ```python
49
+ from deepface import DeepFace
50
+ ```
51
+
52
+ **A Modern Facial Recognition Pipeline** - [`Demo`](https://youtu.be/WnUVYQP4h44)
53
+
54
+ A modern [**face recognition pipeline**](https://sefiks.com/2020/05/01/a-gentle-introduction-to-face-recognition-in-deep-learning/) consists of 5 common stages: [detect](https://sefiks.com/2020/08/25/deep-face-detection-with-opencv-in-python/), [align](https://sefiks.com/2020/02/23/face-alignment-for-face-recognition-in-python-within-opencv/), [normalize](https://sefiks.com/2020/11/20/facial-landmarks-for-face-recognition-with-dlib/), [represent](https://sefiks.com/2018/08/06/deep-face-recognition-with-keras/) and [verify](https://sefiks.com/2020/05/22/fine-tuning-the-threshold-in-face-recognition/). While DeepFace handles all these common stages in the background, you don’t need to acquire in-depth knowledge about all the processes behind it. You can just call its verification, find or analysis function with a single line of code.
55
+
56
+ **Face Verification** - [`Demo`](https://youtu.be/KRCvkNCOphE)
57
+
58
+ This function verifies face pairs as same person or different persons. It expects exact image paths as inputs. Passing numpy or base64 encoded images is also welcome. Then, it is going to return a dictionary and you should check just its verified key.
59
+
60
+ ```python
61
+ result = DeepFace.verify(
62
+ img1_path = "img1.jpg",
63
+ img2_path = "img2.jpg",
64
+ )
65
+ ```
66
+
67
+ <p align="center"><img src="https://raw.githubusercontent.com/serengil/deepface/master/icon/stock-1.jpg" width="95%" height="95%"></p>
68
+
69
+ **Face recognition** - [`Demo`](https://youtu.be/Hrjp-EStM_s)
70
+
71
+ [Face recognition](https://sefiks.com/2020/05/25/large-scale-face-recognition-for-deep-learning/) requires applying face verification many times. Herein, deepface has an out-of-the-box find function to handle this action. It's going to look for the identity of input image in the database path and it will return list of pandas data frame as output. Meanwhile, facial embeddings of the facial database are stored in a pickle file to be searched faster in next time. Result is going to be the size of faces appearing in the source image. Besides, target images in the database can have many faces as well.
72
+
73
+
74
+ ```python
75
+ dfs = DeepFace.find(
76
+ img_path = "img1.jpg",
77
+ db_path = "C:/workspace/my_db",
78
+ )
79
+ ```
80
+
81
+ <p align="center"><img src="https://raw.githubusercontent.com/serengil/deepface/master/icon/stock-6-v2.jpg" width="95%" height="95%"></p>
82
+
83
+ **Embeddings** - [`Demo`](https://youtu.be/OYialFo7Qo4)
84
+
85
+ Face recognition models basically represent facial images as multi-dimensional vectors. Sometimes, you need those embedding vectors directly. DeepFace comes with a dedicated representation function. Represent function returns a list of embeddings. Result is going to be the size of faces appearing in the image path.
86
+
87
+ ```python
88
+ embedding_objs = DeepFace.represent(
89
+ img_path = "img.jpg"
90
+ )
91
+ ```
92
+
93
+ This function returns an array as embedding. The size of the embedding array would be different based on the model name. For instance, VGG-Face is the default model and it represents facial images as 4096 dimensional vectors.
94
+
95
+ ```python
96
+ for embedding_obj in embedding_objs:
97
+ embedding = embedding_obj["embedding"]
98
+ assert isinstance(embedding, list)
99
+ assert (
100
+ model_name == "VGG-Face"
101
+ and len(embedding) == 4096
102
+ )
103
+ ```
104
+
105
+ Here, embedding is also [plotted](https://sefiks.com/2020/05/01/a-gentle-introduction-to-face-recognition-in-deep-learning/) with 4096 slots horizontally. Each slot is corresponding to a dimension value in the embedding vector and dimension value is explained in the colorbar on the right. Similar to 2D barcodes, vertical dimension stores no information in the illustration.
106
+
107
+ <p align="center"><img src="https://raw.githubusercontent.com/serengil/deepface/master/icon/embedding.jpg" width="95%" height="95%"></p>
108
+
109
+ **Face recognition models** - [`Demo`](https://youtu.be/eKOZawGR3y0)
110
+
111
+ DeepFace is a **hybrid** face recognition package. It currently wraps many **state-of-the-art** face recognition models: [`VGG-Face`](https://sefiks.com/2018/08/06/deep-face-recognition-with-keras/) , [`FaceNet`](https://sefiks.com/2018/09/03/face-recognition-with-facenet-in-keras/), [`OpenFace`](https://sefiks.com/2019/07/21/face-recognition-with-openface-in-keras/), [`DeepFace`](https://sefiks.com/2020/02/17/face-recognition-with-facebook-deepface-in-keras/), [`DeepID`](https://sefiks.com/2020/06/16/face-recognition-with-deepid-in-keras/), [`ArcFace`](https://sefiks.com/2020/12/14/deep-face-recognition-with-arcface-in-keras-and-python/), [`Dlib`](https://sefiks.com/2020/07/11/face-recognition-with-dlib-in-python/), `SFace` and `GhostFaceNet`. The default configuration uses VGG-Face model.
112
+
113
+ ```python
114
+ models = [
115
+ "VGG-Face",
116
+ "Facenet",
117
+ "Facenet512",
118
+ "OpenFace",
119
+ "DeepFace",
120
+ "DeepID",
121
+ "ArcFace",
122
+ "Dlib",
123
+ "SFace",
124
+ "GhostFaceNet",
125
+ ]
126
+
127
+ #face verification
128
+ result = DeepFace.verify(
129
+ img1_path = "img1.jpg",
130
+ img2_path = "img2.jpg",
131
+ model_name = models[0],
132
+ )
133
+
134
+ #face recognition
135
+ dfs = DeepFace.find(
136
+ img_path = "img1.jpg",
137
+ db_path = "C:/workspace/my_db",
138
+ model_name = models[1],
139
+ )
140
+
141
+ #embeddings
142
+ embedding_objs = DeepFace.represent(
143
+ img_path = "img.jpg",
144
+ model_name = models[2],
145
+ )
146
+ ```
147
+
148
+ <p align="center"><img src="https://raw.githubusercontent.com/serengil/deepface/master/icon/model-portfolio-20240316.jpg" width="95%" height="95%"></p>
149
+
150
+ FaceNet, VGG-Face, ArcFace and Dlib are overperforming ones based on experiments - see [`BENCHMARKS`](https://github.com/serengil/deepface/tree/master/benchmarks) for more details. You can find the measured scores of various models in DeepFace and the reported scores from their original studies in the following table.
151
+
152
+ | Model | Measured Score | Declared Score |
153
+ | -------------- | -------------- | ------------------ |
154
+ | Facenet512 | 98.4% | 99.6% |
155
+ | Human-beings | 97.5% | 97.5% |
156
+ | Facenet | 97.4% | 99.2% |
157
+ | Dlib | 96.8% | 99.3 % |
158
+ | VGG-Face | 96.7% | 98.9% |
159
+ | ArcFace | 96.7% | 99.5% |
160
+ | GhostFaceNet | 93.3% | 99.7% |
161
+ | SFace | 93.0% | 99.5% |
162
+ | OpenFace | 78.7% | 92.9% |
163
+ | DeepFace | 69.0% | 97.3% |
164
+ | DeepID | 66.5% | 97.4% |
165
+
166
+ Conducting experiments with those models within DeepFace may reveal disparities compared to the original studies, owing to the adoption of distinct detection or normalization techniques. Furthermore, some models have been released solely with their backbones, lacking pre-trained weights. Thus, we are utilizing their re-implementations instead of the original pre-trained weights.
167
+
168
+ **Similarity** - [`Demo`](https://youtu.be/1EPoS69fHOc)
169
+
170
+ Face recognition models are regular [convolutional neural networks](https://sefiks.com/2018/03/23/convolutional-autoencoder-clustering-images-with-neural-networks/) and they are responsible to represent faces as vectors. We expect that a face pair of same person should be [more similar](https://sefiks.com/2020/05/22/fine-tuning-the-threshold-in-face-recognition/) than a face pair of different persons.
171
+
172
+ Similarity could be calculated by different metrics such as [Cosine Similarity](https://sefiks.com/2018/08/13/cosine-similarity-in-machine-learning/), Euclidean Distance or L2 normalized Euclidean. The default configuration uses cosine similarity. According to [experiments](https://github.com/serengil/deepface/tree/master/benchmarks), no distance metric is overperforming than other.
173
+
174
+ ```python
175
+ metrics = ["cosine", "euclidean", "euclidean_l2"]
176
+
177
+ #face verification
178
+ result = DeepFace.verify(
179
+ img1_path = "img1.jpg",
180
+ img2_path = "img2.jpg",
181
+ distance_metric = metrics[1],
182
+ )
183
+
184
+ #face recognition
185
+ dfs = DeepFace.find(
186
+ img_path = "img1.jpg",
187
+ db_path = "C:/workspace/my_db",
188
+ distance_metric = metrics[2],
189
+ )
190
+ ```
191
+
192
+ **Facial Attribute Analysis** - [`Demo`](https://youtu.be/GT2UeN85BdA)
193
+
194
+ DeepFace also comes with a strong facial attribute analysis module including [`age`](https://sefiks.com/2019/02/13/apparent-age-and-gender-prediction-in-keras/), [`gender`](https://sefiks.com/2019/02/13/apparent-age-and-gender-prediction-in-keras/), [`facial expression`](https://sefiks.com/2018/01/01/facial-expression-recognition-with-keras/) (including angry, fear, neutral, sad, disgust, happy and surprise) and [`race`](https://sefiks.com/2019/11/11/race-and-ethnicity-prediction-in-keras/) (including asian, white, middle eastern, indian, latino and black) predictions. Result is going to be the size of faces appearing in the source image.
195
+
196
+ ```python
197
+ objs = DeepFace.analyze(
198
+ img_path = "img4.jpg",
199
+ actions = ['age', 'gender', 'race', 'emotion'],
200
+ )
201
+ ```
202
+
203
+ <p align="center"><img src="https://raw.githubusercontent.com/serengil/deepface/master/icon/stock-2.jpg" width="95%" height="95%"></p>
204
+
205
+ Age model got ± 4.65 MAE; gender model got 97.44% accuracy, 96.29% precision and 95.05% recall as mentioned in its [tutorial](https://sefiks.com/2019/02/13/apparent-age-and-gender-prediction-in-keras/).
206
+
207
+
208
+ **Face Detection and Alignment** - [`Demo`](https://youtu.be/GZ2p2hj2H5k)
209
+
210
+ Face detection and alignment are important early stages of a modern face recognition pipeline. [Experiments](https://github.com/serengil/deepface/tree/master/benchmarks) show that detection increases the face recognition accuracy up to 42%, while alignment increases it up to 6%. [`OpenCV`](https://sefiks.com/2020/02/23/face-alignment-for-face-recognition-in-python-within-opencv/), [`Ssd`](https://sefiks.com/2020/08/25/deep-face-detection-with-opencv-in-python/), [`Dlib`](https://sefiks.com/2020/07/11/face-recognition-with-dlib-in-python/), [`MtCnn`](https://sefiks.com/2020/09/09/deep-face-detection-with-mtcnn-in-python/), `Faster MtCnn`, [`RetinaFace`](https://sefiks.com/2021/04/27/deep-face-detection-with-retinaface-in-python/), [`MediaPipe`](https://sefiks.com/2022/01/14/deep-face-detection-with-mediapipe/), `Yolo`, `YuNet` and `CenterFace` detectors are wrapped in deepface.
211
+
212
+ <p align="center"><img src="https://raw.githubusercontent.com/serengil/deepface/master/icon/detector-portfolio-v6.jpg" width="95%" height="95%"></p>
213
+
214
+ All deepface functions accept optional detector backend and align input arguments. You can switch among those detectors and alignment modes with these arguments. OpenCV is the default detector and alignment is on by default.
215
+
216
+ ```python
217
+ backends = [
218
+ 'opencv',
219
+ 'ssd',
220
+ 'dlib',
221
+ 'mtcnn',
222
+ 'fastmtcnn',
223
+ 'retinaface',
224
+ 'mediapipe',
225
+ 'yolov8',
226
+ 'yunet',
227
+ 'centerface',
228
+ ]
229
+
230
+ alignment_modes = [True, False]
231
+
232
+ #face verification
233
+ obj = DeepFace.verify(
234
+ img1_path = "img1.jpg",
235
+ img2_path = "img2.jpg",
236
+ detector_backend = backends[0],
237
+ align = alignment_modes[0],
238
+ )
239
+
240
+ #face recognition
241
+ dfs = DeepFace.find(
242
+ img_path = "img.jpg",
243
+ db_path = "my_db",
244
+ detector_backend = backends[1],
245
+ align = alignment_modes[0],
246
+ )
247
+
248
+ #embeddings
249
+ embedding_objs = DeepFace.represent(
250
+ img_path = "img.jpg",
251
+ detector_backend = backends[2],
252
+ align = alignment_modes[0],
253
+ )
254
+
255
+ #facial analysis
256
+ demographies = DeepFace.analyze(
257
+ img_path = "img4.jpg",
258
+ detector_backend = backends[3],
259
+ align = alignment_modes[0],
260
+ )
261
+
262
+ #face detection and alignment
263
+ face_objs = DeepFace.extract_faces(
264
+ img_path = "img.jpg",
265
+ detector_backend = backends[4],
266
+ align = alignment_modes[0],
267
+ )
268
+ ```
269
+
270
+ Face recognition models are actually CNN models and they expect standard sized inputs. So, resizing is required before representation. To avoid deformation, deepface adds black padding pixels according to the target size argument after detection and alignment.
271
+
272
+ <p align="center"><img src="https://raw.githubusercontent.com/serengil/deepface/master/icon/detector-outputs-20240414.jpg" width="90%" height="90%"></p>
273
+
274
+ [RetinaFace](https://sefiks.com/2021/04/27/deep-face-detection-with-retinaface-in-python/) and [MtCnn](https://sefiks.com/2020/09/09/deep-face-detection-with-mtcnn-in-python/) seem to overperform in detection and alignment stages but they are much slower. If the speed of your pipeline is more important, then you should use opencv or ssd. On the other hand, if you consider the accuracy, then you should use retinaface or mtcnn.
275
+
276
+ The performance of RetinaFace is very satisfactory even in the crowd as seen in the following illustration. Besides, it comes with an incredible facial landmark detection performance. Highlighted red points show some facial landmarks such as eyes, nose and mouth. That's why, alignment score of RetinaFace is high as well.
277
+
278
+ <p align="center"><img src="https://raw.githubusercontent.com/serengil/deepface/master/icon/retinaface-results.jpeg" width="90%" height="90%">
279
+ <br><em>The Yellow Angels - Fenerbahce Women's Volleyball Team</em>
280
+ </p>
281
+
282
+ You can find out more about RetinaFace on this [repo](https://github.com/serengil/retinaface).
283
+
284
+ **Real Time Analysis** - [`Demo`](https://youtu.be/-c9sSJcx6wI)
285
+
286
+ You can run deepface for real time videos as well. Stream function will access your webcam and apply both face recognition and facial attribute analysis. The function starts to analyze a frame if it can focus a face sequentially 5 frames. Then, it shows results 5 seconds.
287
+
288
+ ```python
289
+ DeepFace.stream(db_path = "C:/User/Sefik/Desktop/database")
290
+ ```
291
+
292
+ <p align="center"><img src="https://raw.githubusercontent.com/serengil/deepface/master/icon/stock-3.jpg" width="90%" height="90%"></p>
293
+
294
+ Even though face recognition is based on one-shot learning, you can use multiple face pictures of a person as well. You should rearrange your directory structure as illustrated below.
295
+
296
+ ```bash
297
+ user
298
+ ├── database
299
+ │ ├── Alice
300
+ │ │ ├── Alice1.jpg
301
+ │ │ ├── Alice2.jpg
302
+ │ ├── Bob
303
+ │ │ ├── Bob.jpg
304
+ ```
305
+
306
+ **React UI** - [`Demo part-i`](https://youtu.be/IXoah6rhxac), [`Demo part-ii`](https://youtu.be/_waBA-cH2D4)
307
+
308
+ If you intend to perform face verification tasks directly from your browser, [deepface-react-ui](https://github.com/serengil/deepface-react-ui) is a separate repository built using ReactJS depending on deepface api.
309
+
310
+ <p align="center"><img src="https://raw.githubusercontent.com/serengil/deepface/master/icon/deepface-and-react.jpg" width="90%" height="90%"></p>
311
+
312
+ **Face Anti Spoofing** - [`Demo`](https://youtu.be/UiK1aIjOBlQ)
313
+
314
+ DeepFace also includes an anti-spoofing analysis module to understand given image is real or fake. To activate this feature, set the `anti_spoofing` argument to True in any DeepFace tasks.
315
+
316
+ <p align="center"><img src="https://raw.githubusercontent.com/serengil/deepface/master/icon/face-anti-spoofing.jpg" width="40%" height="40%"></p>
317
+
318
+ ```python
319
+ # anti spoofing test in face detection
320
+ face_objs = DeepFace.extract_faces(
321
+ img_path="dataset/img1.jpg",
322
+ anti_spoofing = True
323
+ )
324
+ assert all(face_obj["is_real"] is True for face_obj in face_objs)
325
+
326
+ # anti spoofing test in real time analysis
327
+ DeepFace.stream(
328
+ db_path = "C:/User/Sefik/Desktop/database",
329
+ anti_spoofing = True
330
+ )
331
+ ```
332
+
333
+ **API** - [`Demo`](https://youtu.be/HeKCQ6U9XmI)
334
+
335
+ DeepFace serves an API as well - see [`api folder`](https://github.com/serengil/deepface/tree/master/deepface/api/src) for more details. You can clone deepface source code and run the api with the following command. It will use gunicorn server to get a rest service up. In this way, you can call deepface from an external system such as mobile app or web.
336
+
337
+ ```shell
338
+ cd scripts
339
+ ./service.sh
340
+ ```
341
+
342
+ <p align="center"><img src="https://raw.githubusercontent.com/serengil/deepface/master/icon/deepface-api.jpg" width="90%" height="90%"></p>
343
+
344
+ Face recognition, facial attribute analysis and vector representation functions are covered in the API. You are expected to call these functions as http post methods. Default service endpoints will be `http://localhost:5005/verify` for face recognition, `http://localhost:5005/analyze` for facial attribute analysis, and `http://localhost:5005/represent` for vector representation. You can pass input images as exact image paths on your environment, base64 encoded strings or images on web. [Here](https://github.com/serengil/deepface/tree/master/deepface/api/postman), you can find a postman project to find out how these methods should be called.
345
+
346
+ **Dockerized Service** - [`Demo`](https://youtu.be/9Tk9lRQareA)
347
+
348
+ [![Docker Pulls](https://img.shields.io/docker/pulls/serengil/deepface?logo=docker)](https://hub.docker.com/r/serengil/deepface)
349
+
350
+ The following command set will serve deepface on `localhost:5005` via docker. Then, you will be able to consume deepface services such as verify, analyze and represent. Also, if you want to build the image by your own instead of pre-built image from docker hub, [Dockerfile](https://github.com/serengil/deepface/blob/master/Dockerfile) is available in the root folder of the project.
351
+
352
+ ```shell
353
+ # docker build -t serengil/deepface . # build docker image from Dockerfile
354
+ docker pull serengil/deepface # use pre-built docker image from docker hub
355
+ docker run -p 5005:5000 serengil/deepface
356
+ ```
357
+
358
+ <p align="center"><img src="https://raw.githubusercontent.com/serengil/deepface/master/icon/deepface-dockerized-v2.jpg" width="50%" height="50%"></p>
359
+
360
+ **Command Line Interface** - [`Demo`](https://youtu.be/PKKTAr3ts2s)
361
+
362
+ DeepFace comes with a command line interface as well. You are able to access its functions in command line as shown below. The command deepface expects the function name as 1st argument and function arguments thereafter.
363
+
364
+ ```shell
365
+ #face verification
366
+ $ deepface verify -img1_path tests/dataset/img1.jpg -img2_path tests/dataset/img2.jpg
367
+
368
+ #facial analysis
369
+ $ deepface analyze -img_path tests/dataset/img1.jpg
370
+ ```
371
+
372
+ You can also run these commands if you are running deepface with docker. Please follow the instructions in the [shell script](https://github.com/serengil/deepface/blob/master/scripts/dockerize.sh#L17).
373
+
374
+ **Large Scale Facial Recognition** - [`Playlist`](https://www.youtube.com/playlist?list=PLsS_1RYmYQQGSJu_Z3OVhXhGmZ86_zuIm)
375
+
376
+ If your task requires facial recognition on large datasets, you should combine DeepFace with a vector index or vector database. This setup will perform [approximate nearest neighbor](https://youtu.be/c10w0Ptn_CU) searches instead of exact ones, allowing you to identify a face in a database containing billions of entries within milliseconds. Common vector index solutions include [Annoy](https://youtu.be/Jpxm914o2xk), [Faiss](https://youtu.be/6AmEvDTKT-k), [Voyager](https://youtu.be/2ZYTV9HlFdU), [NMSLIB](https://youtu.be/EVBhO8rbKbg), [ElasticSearch](https://youtu.be/i4GvuOmzKzo). For vector databases, popular options are [Postgres with its pgvector extension](https://youtu.be/Xfv4hCWvkp0) and [RediSearch](https://youtu.be/yrXlS0d6t4w).
377
+
378
+ <p align="center"><img src="https://raw.githubusercontent.com/serengil/deepface/master/icon/deepface-big-data.jpg" width="90%" height="90%"></p>
379
+
380
+ Conversely, if your task involves facial recognition on small to moderate-sized databases, you can adopt use relational databases such as [Postgres](https://youtu.be/f41sLxn1c0k) or [SQLite](https://youtu.be/_1ShBeWToPg), or NoSQL databases like [Mongo](https://youtu.be/dmprgum9Xu8), [Redis](https://youtu.be/X7DSpUMVTsw) or [Cassandra](https://youtu.be/J_yXpc3Y8Ec) to perform exact nearest neighbor search.
381
+
382
+ ## Contribution
383
+
384
+ Pull requests are more than welcome! If you are planning to contribute a large patch, please create an issue first to get any upfront questions or design decisions out of the way first.
385
+
386
+ Before creating a PR, you should run the unit tests and linting locally by running `make test && make lint` command. Once a PR sent, GitHub test workflow will be run automatically and unit test and linting jobs will be available in [GitHub actions](https://github.com/serengil/deepface/actions) before approval.
387
+
388
+ ## Support
389
+
390
+ There are many ways to support a project - starring⭐️ the GitHub repo is just one 🙏
391
+
392
+ If you do like this work, then you can support it financially on [Patreon](https://www.patreon.com/serengil?repo=deepface), [GitHub Sponsors](https://github.com/sponsors/serengil) or [Buy Me a Coffee](https://buymeacoffee.com/serengil).
393
+
394
+ <a href="https://www.patreon.com/serengil?repo=deepface">
395
+ <img src="https://raw.githubusercontent.com/serengil/deepface/master/icon/patreon.png" width="30%" height="30%">
396
+ </a>
397
+
398
+ <a href="https://buymeacoffee.com/serengil">
399
+ <img src="https://raw.githubusercontent.com/serengil/deepface/master/icon/bmc-button.png" width="25%" height="25%">
400
+ </a>
401
+
402
+ Also, your company's logo will be shown on README on GitHub if you become a sponsor in gold, silver or bronze tiers.
403
+
404
+ ## Citation
405
+
406
+ Please cite deepface in your publications if it helps your research - see [`CITATIONS`](https://github.com/serengil/deepface/blob/master/CITATION.md) for more details. Here are its BibTex entries:
407
+
408
+ If you use deepface in your research for facial recogntion or face detection purposes, please cite these publications:
409
+
410
+ ```BibTeX
411
+ @article{serengil2024lightface,
412
+ title = {A Benchmark of Facial Recognition Pipelines and Co-Usability Performances of Modules},
413
+ author = {Serengil, Sefik and Ozpinar, Alper},
414
+ journal = {Journal of Information Technologies},
415
+ volume = {17},
416
+ number = {2},
417
+ pages = {95-107},
418
+ year = {2024},
419
+ doi = {10.17671/gazibtd.1399077},
420
+ url = {https://dergipark.org.tr/en/pub/gazibtd/issue/84331/1399077},
421
+ publisher = {Gazi University}
422
+ }
423
+ ```
424
+
425
+ ```BibTeX
426
+ @inproceedings{serengil2020lightface,
427
+ title = {LightFace: A Hybrid Deep Face Recognition Framework},
428
+ author = {Serengil, Sefik Ilkin and Ozpinar, Alper},
429
+ booktitle = {2020 Innovations in Intelligent Systems and Applications Conference (ASYU)},
430
+ pages = {23-27},
431
+ year = {2020},
432
+ doi = {10.1109/ASYU50717.2020.9259802},
433
+ url = {https://ieeexplore.ieee.org/document/9259802},
434
+ organization = {IEEE}
435
+ }
436
+ ```
437
+
438
+ On the other hand, if you use deepface in your research for facial attribute analysis purposes such as age, gender, emotion or ethnicity prediction tasks, please cite this publication.
439
+
440
+ ```BibTeX
441
+ @inproceedings{serengil2021lightface,
442
+ title = {HyperExtended LightFace: A Facial Attribute Analysis Framework},
443
+ author = {Serengil, Sefik Ilkin and Ozpinar, Alper},
444
+ booktitle = {2021 International Conference on Engineering and Emerging Technologies (ICEET)},
445
+ pages = {1-4},
446
+ year = {2021},
447
+ doi = {10.1109/ICEET53442.2021.9659697},
448
+ url = {https://ieeexplore.ieee.org/document/9659697},
449
+ organization = {IEEE}
450
+ }
451
+ ```
452
+
453
+ Also, if you use deepface in your GitHub projects, please add `deepface` in the `requirements.txt`.
454
+
455
+ ## Licence
456
+
457
+ DeepFace is licensed under the MIT License - see [`LICENSE`](https://github.com/serengil/deepface/blob/master/LICENSE) for more details.
458
+
459
+ DeepFace wraps some external face recognition models: [VGG-Face](http://www.robots.ox.ac.uk/~vgg/software/vgg_face/), [Facenet](https://github.com/davidsandberg/facenet/blob/master/LICENSE.md) (both 128d and 512d), [OpenFace](https://github.com/iwantooxxoox/Keras-OpenFace/blob/master/LICENSE), [DeepFace](https://github.com/swghosh/DeepFace), [DeepID](https://github.com/Ruoyiran/DeepID/blob/master/LICENSE.md), [ArcFace](https://github.com/leondgarse/Keras_insightface/blob/master/LICENSE), [Dlib](https://github.com/davisking/dlib/blob/master/dlib/LICENSE.txt), [SFace](https://github.com/opencv/opencv_zoo/blob/master/models/face_recognition_sface/LICENSE) and [GhostFaceNet](https://github.com/HamadYA/GhostFaceNets/blob/main/LICENSE). Besides, age, gender and race / ethnicity models were trained on the backbone of VGG-Face with transfer learning. Similarly, DeepFace wraps many face detectors: [OpenCv](https://github.com/opencv/opencv/blob/4.x/LICENSE), [Ssd](https://github.com/opencv/opencv/blob/master/LICENSE), [Dlib](https://github.com/davisking/dlib/blob/master/LICENSE.txt), [MtCnn](https://github.com/ipazc/mtcnn/blob/master/LICENSE), [Fast MtCnn](https://github.com/timesler/facenet-pytorch/blob/master/LICENSE.md), [RetinaFace](https://github.com/serengil/retinaface/blob/master/LICENSE), [MediaPipe](https://github.com/google/mediapipe/blob/master/LICENSE), [YuNet](https://github.com/ShiqiYu/libfacedetection/blob/master/LICENSE), [Yolo](https://github.com/derronqi/yolov8-face/blob/main/LICENSE) and [CenterFace](https://github.com/Star-Clouds/CenterFace/blob/master/LICENSE). Finally, DeepFace is optionally using [face anti spoofing](https://github.com/minivision-ai/Silent-Face-Anti-Spoofing/blob/master/LICENSE) to determine the given images are real or fake. License types will be inherited when you intend to utilize those models. Please check the license types of those models for production purposes.
460
+
461
+ DeepFace [logo](https://thenounproject.com/term/face-recognition/2965879/) is created by [Adrien Coquet](https://thenounproject.com/coquet_adrien/) and it is licensed under [Creative Commons: By Attribution 3.0 License](https://creativecommons.org/licenses/by/3.0/).
deepface/benchmarks/Evaluate-Results.ipynb ADDED
The diff for this file is too large to render. See raw diff
 
deepface/benchmarks/Perform-Experiments.ipynb ADDED
@@ -0,0 +1,352 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "cells": [
3
+ {
4
+ "cell_type": "markdown",
5
+ "id": "8133a99d",
6
+ "metadata": {},
7
+ "source": [
8
+ "# Perform Experiments with DeepFace on LFW dataset"
9
+ ]
10
+ },
11
+ {
12
+ "cell_type": "code",
13
+ "execution_count": 11,
14
+ "id": "5aab0cbe",
15
+ "metadata": {},
16
+ "outputs": [],
17
+ "source": [
18
+ "# built-in dependencies\n",
19
+ "import os\n",
20
+ "\n",
21
+ "# 3rd party dependencies\n",
22
+ "import numpy as np\n",
23
+ "import pandas as pd\n",
24
+ "from tqdm import tqdm\n",
25
+ "import matplotlib.pyplot as plt\n",
26
+ "from sklearn.metrics import accuracy_score\n",
27
+ "from sklearn.datasets import fetch_lfw_pairs\n",
28
+ "from deepface import DeepFace"
29
+ ]
30
+ },
31
+ {
32
+ "cell_type": "code",
33
+ "execution_count": 2,
34
+ "id": "64c9ed9a",
35
+ "metadata": {},
36
+ "outputs": [
37
+ {
38
+ "name": "stdout",
39
+ "output_type": "stream",
40
+ "text": [
41
+ "This experiment is done with pip package of deepface with 0.0.90 version\n"
42
+ ]
43
+ }
44
+ ],
45
+ "source": [
46
+ "print(f\"This experiment is done with pip package of deepface with {DeepFace.__version__} version\")"
47
+ ]
48
+ },
49
+ {
50
+ "cell_type": "markdown",
51
+ "id": "feaec973",
52
+ "metadata": {},
53
+ "source": [
54
+ "### Configuration Sets"
55
+ ]
56
+ },
57
+ {
58
+ "cell_type": "code",
59
+ "execution_count": 3,
60
+ "id": "453104b4",
61
+ "metadata": {},
62
+ "outputs": [],
63
+ "source": [
64
+ "# all configuration alternatives for 4 dimensions of arguments\n",
65
+ "alignment = [True, False]\n",
66
+ "models = [\"Facenet512\", \"Facenet\", \"VGG-Face\", \"ArcFace\", \"Dlib\", \"GhostFaceNet\", \"SFace\", \"OpenFace\", \"DeepFace\", \"DeepID\"]\n",
67
+ "detectors = [\"retinaface\", \"mtcnn\", \"fastmtcnn\", \"dlib\", \"yolov8\", \"yunet\", \"centerface\", \"mediapipe\", \"ssd\", \"opencv\", \"skip\"]\n",
68
+ "metrics = [\"euclidean\", \"euclidean_l2\", \"cosine\"]\n",
69
+ "expand_percentage = 0"
70
+ ]
71
+ },
72
+ {
73
+ "cell_type": "markdown",
74
+ "id": "c9aeb57a",
75
+ "metadata": {},
76
+ "source": [
77
+ "### Create Required Folders if necessary"
78
+ ]
79
+ },
80
+ {
81
+ "cell_type": "code",
82
+ "execution_count": 4,
83
+ "id": "671d8a00",
84
+ "metadata": {},
85
+ "outputs": [],
86
+ "source": [
87
+ "target_paths = [\"lfwe\", \"dataset\", \"outputs\", \"outputs/test\", \"results\"]\n",
88
+ "for target_path in target_paths:\n",
89
+ " if not os.path.exists(target_path):\n",
90
+ " os.mkdir(target_path)\n",
91
+ " print(f\"{target_path} is just created\")"
92
+ ]
93
+ },
94
+ {
95
+ "cell_type": "markdown",
96
+ "id": "fc31f03a",
97
+ "metadata": {},
98
+ "source": [
99
+ "### Load LFW Dataset"
100
+ ]
101
+ },
102
+ {
103
+ "cell_type": "code",
104
+ "execution_count": 5,
105
+ "id": "721a7d70",
106
+ "metadata": {},
107
+ "outputs": [],
108
+ "source": [
109
+ "pairs_touch = \"outputs/test_lfwe.txt\"\n",
110
+ "instances = 1000 #pairs.shape[0]"
111
+ ]
112
+ },
113
+ {
114
+ "cell_type": "code",
115
+ "execution_count": 6,
116
+ "id": "010184d8",
117
+ "metadata": {},
118
+ "outputs": [],
119
+ "source": [
120
+ "target_path = \"dataset/test_lfw.npy\"\n",
121
+ "labels_path = \"dataset/test_labels.npy\"\n",
122
+ "\n",
123
+ "if os.path.exists(target_path) != True:\n",
124
+ " fetch_lfw_pairs = fetch_lfw_pairs(subset = 'test', color = True\n",
125
+ " , resize = 2\n",
126
+ " , funneled = False\n",
127
+ " , slice_=None\n",
128
+ " )\n",
129
+ " pairs = fetch_lfw_pairs.pairs\n",
130
+ " labels = fetch_lfw_pairs.target\n",
131
+ " target_names = fetch_lfw_pairs.target_names\n",
132
+ " np.save(target_path, pairs)\n",
133
+ " np.save(labels_path, labels)\n",
134
+ "else:\n",
135
+ " if not os.path.exists(pairs_touch):\n",
136
+ " # loading pairs takes some time. but if we extract these pairs as image, no need to load it anymore\n",
137
+ " pairs = np.load(target_path)\n",
138
+ " labels = np.load(labels_path) "
139
+ ]
140
+ },
141
+ {
142
+ "cell_type": "markdown",
143
+ "id": "005f582e",
144
+ "metadata": {},
145
+ "source": [
146
+ "### Save LFW image pairs into file system"
147
+ ]
148
+ },
149
+ {
150
+ "cell_type": "code",
151
+ "execution_count": 7,
152
+ "id": "5bc23313",
153
+ "metadata": {},
154
+ "outputs": [
155
+ {
156
+ "name": "stderr",
157
+ "output_type": "stream",
158
+ "text": [
159
+ "100%|██████████| 1000/1000 [00:00<00:00, 190546.25it/s]\n"
160
+ ]
161
+ }
162
+ ],
163
+ "source": [
164
+ "for i in tqdm(range(0, instances)):\n",
165
+ " img1_target = f\"lfwe/test/{i}_1.jpg\"\n",
166
+ " img2_target = f\"lfwe/test/{i}_2.jpg\"\n",
167
+ " \n",
168
+ " if not os.path.exists(img1_target):\n",
169
+ " img1 = pairs[i][0]\n",
170
+ " # plt.imsave(img1_target, img1/255) #works for my mac\n",
171
+ " plt.imsave(img1_target, img1) #works for my debian\n",
172
+ " \n",
173
+ " if not os.path.exists(img2_target):\n",
174
+ " img2 = pairs[i][1]\n",
175
+ " # plt.imsave(img2_target, img2/255) #works for my mac\n",
176
+ " plt.imsave(img2_target, img2) #works for my debian\n",
177
+ " \n",
178
+ "if not os.path.exists(pairs_touch):\n",
179
+ " open(pairs_touch,'a').close()"
180
+ ]
181
+ },
182
+ {
183
+ "cell_type": "markdown",
184
+ "id": "6f8fa8fa",
185
+ "metadata": {},
186
+ "source": [
187
+ "### Perform Experiments\n",
188
+ "\n",
189
+ "This block will save the experiments results in outputs folder"
190
+ ]
191
+ },
192
+ {
193
+ "cell_type": "code",
194
+ "execution_count": 8,
195
+ "id": "e7fba936",
196
+ "metadata": {},
197
+ "outputs": [],
198
+ "source": [
199
+ "for model_name in models:\n",
200
+ " for detector_backend in detectors:\n",
201
+ " for distance_metric in metrics:\n",
202
+ " for align in alignment:\n",
203
+ " \n",
204
+ " if detector_backend == \"skip\" and align is True:\n",
205
+ " # Alignment is not possible for a skipped detector configuration\n",
206
+ " continue\n",
207
+ " \n",
208
+ " alignment_text = \"aligned\" if align is True else \"unaligned\"\n",
209
+ " task = f\"{model_name}_{detector_backend}_{distance_metric}_{alignment_text}\"\n",
210
+ " output_file = f\"outputs/test/{task}.csv\"\n",
211
+ " if os.path.exists(output_file):\n",
212
+ " #print(f\"{output_file} is available already\")\n",
213
+ " continue\n",
214
+ " \n",
215
+ " distances = []\n",
216
+ " for i in tqdm(range(0, instances), desc = task):\n",
217
+ " img1_target = f\"lfwe/test/{i}_1.jpg\"\n",
218
+ " img2_target = f\"lfwe/test/{i}_2.jpg\"\n",
219
+ " result = DeepFace.verify(\n",
220
+ " img1_path=img1_target,\n",
221
+ " img2_path=img2_target,\n",
222
+ " model_name=model_name,\n",
223
+ " detector_backend=detector_backend,\n",
224
+ " distance_metric=distance_metric,\n",
225
+ " align=align,\n",
226
+ " enforce_detection=False,\n",
227
+ " expand_percentage=expand_percentage,\n",
228
+ " )\n",
229
+ " distance = result[\"distance\"]\n",
230
+ " distances.append(distance)\n",
231
+ " # -----------------------------------\n",
232
+ " df = pd.DataFrame(list(labels), columns = [\"actuals\"])\n",
233
+ " df[\"distances\"] = distances\n",
234
+ " df.to_csv(output_file, index=False)"
235
+ ]
236
+ },
237
+ {
238
+ "cell_type": "markdown",
239
+ "id": "a0b8dafa",
240
+ "metadata": {},
241
+ "source": [
242
+ "### Calculate Results\n",
243
+ "\n",
244
+ "Experiments were responsible for calculating distances. We will calculate the best accuracy scores in this block."
245
+ ]
246
+ },
247
+ {
248
+ "cell_type": "code",
249
+ "execution_count": 9,
250
+ "id": "67376e76",
251
+ "metadata": {},
252
+ "outputs": [],
253
+ "source": [
254
+ "data = [[0 for _ in range(len(models))] for _ in range(len(detectors))]\n",
255
+ "base_df = pd.DataFrame(data, columns=models, index=detectors)"
256
+ ]
257
+ },
258
+ {
259
+ "cell_type": "code",
260
+ "execution_count": 10,
261
+ "id": "f2cc536b",
262
+ "metadata": {},
263
+ "outputs": [
264
+ {
265
+ "name": "stdout",
266
+ "output_type": "stream",
267
+ "text": [
268
+ "results/pivot_euclidean_with_alignment_True.csv saved\n",
269
+ "results/pivot_euclidean_l2_with_alignment_True.csv saved\n",
270
+ "results/pivot_cosine_with_alignment_True.csv saved\n",
271
+ "results/pivot_euclidean_with_alignment_False.csv saved\n",
272
+ "results/pivot_euclidean_l2_with_alignment_False.csv saved\n",
273
+ "results/pivot_cosine_with_alignment_False.csv saved\n"
274
+ ]
275
+ }
276
+ ],
277
+ "source": [
278
+ "for is_aligned in alignment:\n",
279
+ " for distance_metric in metrics:\n",
280
+ "\n",
281
+ " current_df = base_df.copy()\n",
282
+ " \n",
283
+ " target_file = f\"results/pivot_{distance_metric}_with_alignment_{is_aligned}.csv\"\n",
284
+ " if os.path.exists(target_file):\n",
285
+ " continue\n",
286
+ " \n",
287
+ " for model_name in models:\n",
288
+ " for detector_backend in detectors:\n",
289
+ "\n",
290
+ " align = \"aligned\" if is_aligned is True else \"unaligned\"\n",
291
+ "\n",
292
+ " if detector_backend == \"skip\" and is_aligned is True:\n",
293
+ " # Alignment is not possible for a skipped detector configuration\n",
294
+ " align = \"unaligned\"\n",
295
+ "\n",
296
+ " source_file = f\"outputs/test/{model_name}_{detector_backend}_{distance_metric}_{align}.csv\"\n",
297
+ " df = pd.read_csv(source_file)\n",
298
+ " \n",
299
+ " positive_mean = df[(df[\"actuals\"] == True) | (df[\"actuals\"] == 1)][\"distances\"].mean()\n",
300
+ " negative_mean = df[(df[\"actuals\"] == False) | (df[\"actuals\"] == 0)][\"distances\"].mean()\n",
301
+ "\n",
302
+ " distances = sorted(df[\"distances\"].values.tolist())\n",
303
+ "\n",
304
+ " items = []\n",
305
+ " for i, distance in enumerate(distances):\n",
306
+ " if distance >= positive_mean and distance <= negative_mean:\n",
307
+ " sandbox_df = df.copy()\n",
308
+ " sandbox_df[\"predictions\"] = False\n",
309
+ " idx = sandbox_df[sandbox_df[\"distances\"] < distance].index\n",
310
+ " sandbox_df.loc[idx, \"predictions\"] = True\n",
311
+ "\n",
312
+ " actuals = sandbox_df.actuals.values.tolist()\n",
313
+ " predictions = sandbox_df.predictions.values.tolist()\n",
314
+ " accuracy = 100*accuracy_score(actuals, predictions)\n",
315
+ " items.append((distance, accuracy))\n",
316
+ "\n",
317
+ " pivot_df = pd.DataFrame(items, columns = [\"distance\", \"accuracy\"])\n",
318
+ " pivot_df = pivot_df.sort_values(by = [\"accuracy\"], ascending = False)\n",
319
+ " threshold = pivot_df.iloc[0][\"distance\"]\n",
320
+ " # print(f\"threshold for {model_name}/{detector_backend} is {threshold}\")\n",
321
+ " accuracy = pivot_df.iloc[0][\"accuracy\"]\n",
322
+ "\n",
323
+ " # print(source_file, round(accuracy, 1))\n",
324
+ " current_df.at[detector_backend, model_name] = round(accuracy, 1)\n",
325
+ " \n",
326
+ " current_df.to_csv(target_file)\n",
327
+ " print(f\"{target_file} saved\")"
328
+ ]
329
+ }
330
+ ],
331
+ "metadata": {
332
+ "kernelspec": {
333
+ "display_name": "Python 3 (ipykernel)",
334
+ "language": "python",
335
+ "name": "python3"
336
+ },
337
+ "language_info": {
338
+ "codemirror_mode": {
339
+ "name": "ipython",
340
+ "version": 3
341
+ },
342
+ "file_extension": ".py",
343
+ "mimetype": "text/x-python",
344
+ "name": "python",
345
+ "nbconvert_exporter": "python",
346
+ "pygments_lexer": "ipython3",
347
+ "version": "3.9.16"
348
+ }
349
+ },
350
+ "nbformat": 4,
351
+ "nbformat_minor": 5
352
+ }
deepface/benchmarks/README.md ADDED
@@ -0,0 +1,134 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Benchmarks
2
+
3
+ [`🎥 Video Tutorial`](https://youtu.be/eKOZawGR3y0)
4
+
5
+ DeepFace offers various configurations that significantly impact accuracy, including the facial recognition model, face detector model, distance metric, and alignment mode. Our experiments conducted on the [LFW dataset](https://sefiks.com/2020/08/27/labeled-faces-in-the-wild-for-face-recognition/) using different combinations of these configurations yield the following results.
6
+
7
+ You can reproduce the results by executing the `Perform-Experiments.ipynb` and `Evaluate-Results.ipynb` notebooks, respectively.
8
+
9
+ ## ROC Curves
10
+
11
+ ROC curves provide a valuable means of evaluating the performance of different models on a broader scale. The following illusration shows ROC curves for different facial recognition models alongside their optimal configurations yielding the highest accuracy scores.
12
+
13
+ <p align="center"><img src="https://raw.githubusercontent.com/serengil/deepface/master/icon/benchmarks.jpg" width="95%" height="95%"></p>
14
+
15
+ In summary, FaceNet-512d surpasses human-level accuracy, while FaceNet-128d reaches it, with Dlib, VGG-Face, and ArcFace closely trailing but slightly below, and GhostFaceNet and SFace making notable contributions despite not leading, while OpenFace, DeepFace, and DeepId exhibit lower performance.
16
+
17
+ ## Accuracy Scores
18
+
19
+ Please note that humans achieve a 97.5% accuracy score on the same dataset. Configurations that outperform this benchmark are highlighted in bold.
20
+
21
+ ## Performance Matrix for euclidean while alignment is True
22
+
23
+ | | Facenet512 |Facenet |VGG-Face |ArcFace |Dlib |GhostFaceNet |SFace |OpenFace |DeepFace |DeepID |
24
+ | --- | --- | --- | --- | --- | --- | --- | --- | --- | --- | --- |
25
+ | retinaface |95.9 |93.5 |95.8 |85.2 |88.9 |85.9 |80.2 |69.4 |67.0 |65.6 |
26
+ | mtcnn |95.2 |93.8 |95.9 |83.7 |89.4 |83.0 |77.4 |70.2 |66.5 |63.3 |
27
+ | fastmtcnn |96.0 |93.4 |95.8 |83.5 |91.1 |82.8 |77.7 |69.4 |66.7 |64.0 |
28
+ | dlib |96.0 |90.8 |94.5 |88.6 |96.8 |65.7 |66.3 |75.8 |63.4 |60.4 |
29
+ | yolov8 |94.4 |91.9 |95.0 |84.1 |89.2 |77.6 |73.4 |68.7 |69.0 |66.5 |
30
+ | yunet |97.3 |96.1 |96.0 |84.9 |92.2 |84.0 |79.4 |70.9 |65.8 |65.2 |
31
+ | centerface |**97.6** |95.8 |95.7 |83.6 |90.4 |82.8 |77.4 |68.9 |65.5 |62.8 |
32
+ | mediapipe |95.1 |88.6 |92.9 |73.2 |93.1 |63.2 |72.5 |78.7 |61.8 |62.2 |
33
+ | ssd |88.9 |85.6 |87.0 |75.8 |83.1 |79.1 |76.9 |66.8 |63.4 |62.5 |
34
+ | opencv |88.2 |84.2 |87.3 |73.0 |84.4 |83.8 |81.1 |66.4 |65.5 |59.6 |
35
+ | skip |92.0 |64.1 |90.6 |56.6 |69.0 |75.1 |81.4 |57.4 |60.8 |60.7 |
36
+
37
+ ## Performance Matrix for euclidean while alignment is False
38
+
39
+ | | Facenet512 |Facenet |VGG-Face |ArcFace |Dlib |GhostFaceNet |SFace |OpenFace |DeepFace |DeepID |
40
+ | --- | --- | --- | --- | --- | --- | --- | --- | --- | --- | --- |
41
+ | retinaface |96.1 |92.8 |95.7 |84.1 |88.3 |83.2 |78.6 |70.8 |67.4 |64.3 |
42
+ | mtcnn |95.9 |92.5 |95.5 |81.8 |89.3 |83.2 |76.3 |70.9 |65.9 |63.2 |
43
+ | fastmtcnn |96.3 |93.0 |96.0 |82.2 |90.0 |82.7 |76.8 |71.2 |66.5 |64.3 |
44
+ | dlib |96.0 |89.0 |94.1 |82.6 |96.3 |65.6 |73.1 |75.9 |61.8 |61.9 |
45
+ | yolov8 |94.8 |90.8 |95.2 |83.2 |88.4 |77.6 |71.6 |68.9 |68.2 |66.3 |
46
+ | yunet |**97.9** |96.5 |96.3 |84.1 |91.4 |82.7 |78.2 |71.7 |65.5 |65.2 |
47
+ | centerface |97.4 |95.4 |95.8 |83.2 |90.3 |82.0 |76.5 |69.9 |65.7 |62.9 |
48
+ | mediapipe |94.9 |87.1 |93.1 |71.1 |91.9 |61.9 |73.2 |77.6 |61.7 |62.4 |
49
+ | ssd |97.2 |94.9 |96.7 |83.9 |88.6 |84.9 |82.0 |69.9 |66.7 |64.0 |
50
+ | opencv |94.1 |90.2 |95.8 |89.8 |91.2 |91.0 |86.9 |71.1 |68.4 |61.1 |
51
+ | skip |92.0 |64.1 |90.6 |56.6 |69.0 |75.1 |81.4 |57.4 |60.8 |60.7 |
52
+
53
+ ## Performance Matrix for euclidean_l2 while alignment is True
54
+
55
+ | | Facenet512 |Facenet |VGG-Face |ArcFace |Dlib |GhostFaceNet |SFace |OpenFace |DeepFace |DeepID |
56
+ | --- | --- | --- | --- | --- | --- | --- | --- | --- | --- | --- |
57
+ | retinaface |**98.4** |96.4 |95.8 |96.6 |89.1 |90.5 |92.4 |69.4 |67.7 |64.4 |
58
+ | mtcnn |**97.6** |96.8 |95.9 |96.0 |90.0 |89.8 |90.5 |70.2 |66.4 |64.0 |
59
+ | fastmtcnn |**98.1** |97.2 |95.8 |96.4 |91.0 |89.5 |90.0 |69.4 |67.4 |64.1 |
60
+ | dlib |97.0 |92.6 |94.5 |95.1 |96.4 |63.3 |69.8 |75.8 |66.5 |59.5 |
61
+ | yolov8 |97.3 |95.7 |95.0 |95.5 |88.8 |88.9 |91.9 |68.7 |67.5 |66.0 |
62
+ | yunet |**97.9** |97.4 |96.0 |96.7 |91.6 |89.1 |91.0 |70.9 |66.5 |63.6 |
63
+ | centerface |**97.7** |96.8 |95.7 |96.5 |90.9 |87.5 |89.3 |68.9 |67.8 |64.0 |
64
+ | mediapipe |96.1 |90.6 |92.9 |90.3 |92.6 |64.4 |75.4 |78.7 |64.7 |63.0 |
65
+ | ssd |88.7 |87.5 |87.0 |86.2 |83.3 |82.2 |84.6 |66.8 |64.1 |62.6 |
66
+ | opencv |87.6 |84.8 |87.3 |84.6 |84.0 |85.0 |83.6 |66.4 |63.8 |60.9 |
67
+ | skip |91.4 |67.6 |90.6 |57.2 |69.3 |78.4 |83.4 |57.4 |62.6 |61.6 |
68
+
69
+ ## Performance Matrix for euclidean_l2 while alignment is False
70
+
71
+ | | Facenet512 |Facenet |VGG-Face |ArcFace |Dlib |GhostFaceNet |SFace |OpenFace |DeepFace |DeepID |
72
+ | --- | --- | --- | --- | --- | --- | --- | --- | --- | --- | --- |
73
+ | retinaface |**98.0** |95.9 |95.7 |95.7 |88.4 |89.5 |90.6 |70.8 |67.7 |64.6 |
74
+ | mtcnn |**97.8** |96.2 |95.5 |95.9 |89.2 |88.0 |91.1 |70.9 |67.0 |64.0 |
75
+ | fastmtcnn |**97.7** |96.6 |96.0 |95.9 |89.6 |87.8 |89.7 |71.2 |67.8 |64.2 |
76
+ | dlib |96.5 |89.9 |94.1 |93.8 |95.6 |63.0 |75.0 |75.9 |62.6 |61.8 |
77
+ | yolov8 |**97.7** |95.8 |95.2 |95.0 |88.1 |88.7 |89.8 |68.9 |68.9 |65.3 |
78
+ | yunet |**98.3** |96.8 |96.3 |96.1 |91.7 |88.0 |90.5 |71.7 |67.6 |63.2 |
79
+ | centerface |97.4 |96.3 |95.8 |95.8 |90.2 |86.8 |89.3 |69.9 |68.4 |63.1 |
80
+ | mediapipe |96.3 |90.0 |93.1 |89.3 |91.8 |65.6 |74.6 |77.6 |64.9 |61.6 |
81
+ | ssd |**97.9** |97.0 |96.7 |96.6 |89.4 |91.5 |93.0 |69.9 |68.7 |64.9 |
82
+ | opencv |96.2 |92.9 |95.8 |93.2 |91.5 |93.3 |91.7 |71.1 |68.3 |61.6 |
83
+ | skip |91.4 |67.6 |90.6 |57.2 |69.3 |78.4 |83.4 |57.4 |62.6 |61.6 |
84
+
85
+ ## Performance Matrix for cosine while alignment is True
86
+
87
+ | | Facenet512 |Facenet |VGG-Face |ArcFace |Dlib |GhostFaceNet |SFace |OpenFace |DeepFace |DeepID |
88
+ | --- | --- | --- | --- | --- | --- | --- | --- | --- | --- | --- |
89
+ | retinaface |**98.4** |96.4 |95.8 |96.6 |89.1 |90.5 |92.4 |69.4 |67.7 |64.4 |
90
+ | mtcnn |**97.6** |96.8 |95.9 |96.0 |90.0 |89.8 |90.5 |70.2 |66.3 |63.0 |
91
+ | fastmtcnn |**98.1** |97.2 |95.8 |96.4 |91.0 |89.5 |90.0 |69.4 |67.4 |63.6 |
92
+ | dlib |97.0 |92.6 |94.5 |95.1 |96.4 |63.3 |69.8 |75.8 |66.5 |58.7 |
93
+ | yolov8 |97.3 |95.7 |95.0 |95.5 |88.8 |88.9 |91.9 |68.7 |67.5 |65.9 |
94
+ | yunet |**97.9** |97.4 |96.0 |96.7 |91.6 |89.1 |91.0 |70.9 |66.5 |63.5 |
95
+ | centerface |**97.7** |96.8 |95.7 |96.5 |90.9 |87.5 |89.3 |68.9 |67.8 |63.6 |
96
+ | mediapipe |96.1 |90.6 |92.9 |90.3 |92.6 |64.3 |75.4 |78.7 |64.8 |63.0 |
97
+ | ssd |88.7 |87.5 |87.0 |86.2 |83.3 |82.2 |84.5 |66.8 |63.8 |62.6 |
98
+ | opencv |87.6 |84.9 |87.2 |84.6 |84.0 |85.0 |83.6 |66.2 |63.7 |60.1 |
99
+ | skip |91.4 |67.6 |90.6 |54.8 |69.3 |78.4 |83.4 |57.4 |62.6 |61.1 |
100
+
101
+ ## Performance Matrix for cosine while alignment is False
102
+
103
+ | | Facenet512 |Facenet |VGG-Face |ArcFace |Dlib |GhostFaceNet |SFace |OpenFace |DeepFace |DeepID |
104
+ | --- | --- | --- | --- | --- | --- | --- | --- | --- | --- | --- |
105
+ | retinaface |**98.0** |95.9 |95.7 |95.7 |88.4 |89.5 |90.6 |70.8 |67.7 |63.7 |
106
+ | mtcnn |**97.8** |96.2 |95.5 |95.9 |89.2 |88.0 |91.1 |70.9 |67.0 |64.0 |
107
+ | fastmtcnn |**97.7** |96.6 |96.0 |95.9 |89.6 |87.8 |89.7 |71.2 |67.8 |62.7 |
108
+ | dlib |96.5 |89.9 |94.1 |93.8 |95.6 |63.0 |75.0 |75.9 |62.6 |61.7 |
109
+ | yolov8 |**97.7** |95.8 |95.2 |95.0 |88.1 |88.7 |89.8 |68.9 |68.9 |65.3 |
110
+ | yunet |**98.3** |96.8 |96.3 |96.1 |91.7 |88.0 |90.5 |71.7 |67.6 |63.2 |
111
+ | centerface |97.4 |96.3 |95.8 |95.8 |90.2 |86.8 |89.3 |69.9 |68.4 |62.6 |
112
+ | mediapipe |96.3 |90.0 |93.1 |89.3 |91.8 |64.8 |74.6 |77.6 |64.9 |61.6 |
113
+ | ssd |**97.9** |97.0 |96.7 |96.6 |89.4 |91.5 |93.0 |69.9 |68.7 |63.8 |
114
+ | opencv |96.2 |92.9 |95.8 |93.2 |91.5 |93.3 |91.7 |71.1 |68.1 |61.1 |
115
+ | skip |91.4 |67.6 |90.6 |54.8 |69.3 |78.4 |83.4 |57.4 |62.6 |61.1 |
116
+
117
+ # Citation
118
+
119
+ Please cite deepface in your publications if it helps your research - see [`CITATIONS`](https://github.com/serengil/deepface/blob/master/CITATION.md) for more details. Here is its BibTex entry:
120
+
121
+ ```BibTeX
122
+ @article{serengil2024lightface,
123
+ title = {A Benchmark of Facial Recognition Pipelines and Co-Usability Performances of Modules},
124
+ author = {Serengil, Sefik Ilkin and Ozpinar, Alper},
125
+ journal = {Bilisim Teknolojileri Dergisi},
126
+ volume = {17},
127
+ number = {2},
128
+ pages = {95-107},
129
+ year = {2024},
130
+ doi = {10.17671/gazibtd.1399077},
131
+ url = {https://dergipark.org.tr/en/pub/gazibtd/issue/84331/1399077},
132
+ publisher = {Gazi University}
133
+ }
134
+ ```
deepface/deepface/DeepFace.py ADDED
@@ -0,0 +1,615 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # common dependencies
2
+ import os
3
+ import warnings
4
+ import logging
5
+ from typing import Any, Dict, List, Union, Optional
6
+
7
+ # this has to be set before importing tensorflow
8
+ os.environ["TF_USE_LEGACY_KERAS"] = "1"
9
+
10
+ # pylint: disable=wrong-import-position
11
+
12
+ # 3rd party dependencies
13
+ import numpy as np
14
+ import pandas as pd
15
+ import tensorflow as tf
16
+
17
+ # package dependencies
18
+ from deepface.commons import package_utils, folder_utils
19
+ from deepface.commons.logger import Logger
20
+ from deepface.modules import (
21
+ modeling,
22
+ representation,
23
+ verification,
24
+ recognition,
25
+ demography,
26
+ detection,
27
+ streaming,
28
+ preprocessing,
29
+ )
30
+ from deepface import __version__
31
+
32
+ logger = Logger()
33
+
34
+ # -----------------------------------
35
+ # configurations for dependencies
36
+
37
+ # users should install tf_keras package if they are using tf 2.16 or later versions
38
+ package_utils.validate_for_keras3()
39
+
40
+ warnings.filterwarnings("ignore")
41
+ os.environ["TF_CPP_MIN_LOG_LEVEL"] = "3"
42
+ tf_version = package_utils.get_tf_major_version()
43
+ if tf_version == 2:
44
+ tf.get_logger().setLevel(logging.ERROR)
45
+ # -----------------------------------
46
+
47
+ # create required folders if necessary to store model weights
48
+ folder_utils.initialize_folder()
49
+
50
+
51
+ def build_model(model_name: str, task: str = "facial_recognition") -> Any:
52
+ """
53
+ This function builds a pre-trained model
54
+ Args:
55
+ model_name (str): model identifier
56
+ - VGG-Face, Facenet, Facenet512, OpenFace, DeepFace, DeepID, Dlib,
57
+ ArcFace, SFace, GhostFaceNet for face recognition
58
+ - Age, Gender, Emotion, Race for facial attributes
59
+ - opencv, mtcnn, ssd, dlib, retinaface, mediapipe, yolov8, yunet,
60
+ fastmtcnn or centerface for face detectors
61
+ - Fasnet for spoofing
62
+ task (str): facial_recognition, facial_attribute, face_detector, spoofing
63
+ default is facial_recognition
64
+ Returns:
65
+ built_model
66
+ """
67
+ return modeling.build_model(task=task, model_name=model_name)
68
+
69
+
70
+ def verify(
71
+ img1_path: Union[str, np.ndarray, List[float]],
72
+ img2_path: Union[str, np.ndarray, List[float]],
73
+ model_name: str = "VGG-Face",
74
+ detector_backend: str = "opencv",
75
+ distance_metric: str = "cosine",
76
+ enforce_detection: bool = True,
77
+ align: bool = True,
78
+ expand_percentage: int = 0,
79
+ normalization: str = "base",
80
+ silent: bool = False,
81
+ threshold: Optional[float] = None,
82
+ anti_spoofing: bool = False,
83
+ ) -> Dict[str, Any]:
84
+ """
85
+ Verify if an image pair represents the same person or different persons.
86
+ Args:
87
+ img1_path (str or np.ndarray or List[float]): Path to the first image.
88
+ Accepts exact image path as a string, numpy array (BGR), base64 encoded images
89
+ or pre-calculated embeddings.
90
+
91
+ img2_path (str or np.ndarray or List[float]): Path to the second image.
92
+ Accepts exact image path as a string, numpy array (BGR), base64 encoded images
93
+ or pre-calculated embeddings.
94
+
95
+ model_name (str): Model for face recognition. Options: VGG-Face, Facenet, Facenet512,
96
+ OpenFace, DeepFace, DeepID, Dlib, ArcFace, SFace and GhostFaceNet (default is VGG-Face).
97
+
98
+ detector_backend (string): face detector backend. Options: 'opencv', 'retinaface',
99
+ 'mtcnn', 'ssd', 'dlib', 'mediapipe', 'yolov8', 'centerface' or 'skip'
100
+ (default is opencv).
101
+
102
+ distance_metric (string): Metric for measuring similarity. Options: 'cosine',
103
+ 'euclidean', 'euclidean_l2' (default is cosine).
104
+
105
+ enforce_detection (boolean): If no face is detected in an image, raise an exception.
106
+ Set to False to avoid the exception for low-resolution images (default is True).
107
+
108
+ align (bool): Flag to enable face alignment (default is True).
109
+
110
+ expand_percentage (int): expand detected facial area with a percentage (default is 0).
111
+
112
+ normalization (string): Normalize the input image before feeding it to the model.
113
+ Options: base, raw, Facenet, Facenet2018, VGGFace, VGGFace2, ArcFace (default is base)
114
+
115
+ silent (boolean): Suppress or allow some log messages for a quieter analysis process
116
+ (default is False).
117
+
118
+ threshold (float): Specify a threshold to determine whether a pair represents the same
119
+ person or different individuals. This threshold is used for comparing distances.
120
+ If left unset, default pre-tuned threshold values will be applied based on the specified
121
+ model name and distance metric (default is None).
122
+
123
+ anti_spoofing (boolean): Flag to enable anti spoofing (default is False).
124
+
125
+ Returns:
126
+ result (dict): A dictionary containing verification results with following keys.
127
+
128
+ - 'verified' (bool): Indicates whether the images represent the same person (True)
129
+ or different persons (False).
130
+
131
+ - 'distance' (float): The distance measure between the face vectors.
132
+ A lower distance indicates higher similarity.
133
+
134
+ - 'threshold' (float): The maximum threshold used for verification.
135
+ If the distance is below this threshold, the images are considered a match.
136
+
137
+ - 'model' (str): The chosen face recognition model.
138
+
139
+ - 'distance_metric' (str): The chosen similarity metric for measuring distances.
140
+
141
+ - 'facial_areas' (dict): Rectangular regions of interest for faces in both images.
142
+ - 'img1': {'x': int, 'y': int, 'w': int, 'h': int}
143
+ Region of interest for the first image.
144
+ - 'img2': {'x': int, 'y': int, 'w': int, 'h': int}
145
+ Region of interest for the second image.
146
+
147
+ - 'time' (float): Time taken for the verification process in seconds.
148
+ """
149
+
150
+ return verification.verify(
151
+ img1_path=img1_path,
152
+ img2_path=img2_path,
153
+ model_name=model_name,
154
+ detector_backend=detector_backend,
155
+ distance_metric=distance_metric,
156
+ enforce_detection=enforce_detection,
157
+ align=align,
158
+ expand_percentage=expand_percentage,
159
+ normalization=normalization,
160
+ silent=silent,
161
+ threshold=threshold,
162
+ anti_spoofing=anti_spoofing,
163
+ )
164
+
165
+
166
+ def analyze(
167
+ img_path: Union[str, np.ndarray],
168
+ actions: Union[tuple, list] = ("emotion", "age", "gender", "race"),
169
+ enforce_detection: bool = True,
170
+ detector_backend: str = "opencv",
171
+ align: bool = True,
172
+ expand_percentage: int = 0,
173
+ silent: bool = False,
174
+ anti_spoofing: bool = False,
175
+ ) -> List[Dict[str, Any]]:
176
+ """
177
+ Analyze facial attributes such as age, gender, emotion, and race in the provided image.
178
+ Args:
179
+ img_path (str or np.ndarray): The exact path to the image, a numpy array in BGR format,
180
+ or a base64 encoded image. If the source image contains multiple faces, the result will
181
+ include information for each detected face.
182
+
183
+ actions (tuple): Attributes to analyze. The default is ('age', 'gender', 'emotion', 'race').
184
+ You can exclude some of these attributes from the analysis if needed.
185
+
186
+ enforce_detection (boolean): If no face is detected in an image, raise an exception.
187
+ Set to False to avoid the exception for low-resolution images (default is True).
188
+
189
+ detector_backend (string): face detector backend. Options: 'opencv', 'retinaface',
190
+ 'mtcnn', 'ssd', 'dlib', 'mediapipe', 'yolov8', 'centerface' or 'skip'
191
+ (default is opencv).
192
+
193
+ distance_metric (string): Metric for measuring similarity. Options: 'cosine',
194
+ 'euclidean', 'euclidean_l2' (default is cosine).
195
+
196
+ align (boolean): Perform alignment based on the eye positions (default is True).
197
+
198
+ expand_percentage (int): expand detected facial area with a percentage (default is 0).
199
+
200
+ silent (boolean): Suppress or allow some log messages for a quieter analysis process
201
+ (default is False).
202
+
203
+ anti_spoofing (boolean): Flag to enable anti spoofing (default is False).
204
+
205
+ Returns:
206
+ results (List[Dict[str, Any]]): A list of dictionaries, where each dictionary represents
207
+ the analysis results for a detected face. Each dictionary in the list contains the
208
+ following keys:
209
+
210
+ - 'region' (dict): Represents the rectangular region of the detected face in the image.
211
+ - 'x': x-coordinate of the top-left corner of the face.
212
+ - 'y': y-coordinate of the top-left corner of the face.
213
+ - 'w': Width of the detected face region.
214
+ - 'h': Height of the detected face region.
215
+
216
+ - 'age' (float): Estimated age of the detected face.
217
+
218
+ - 'face_confidence' (float): Confidence score for the detected face.
219
+ Indicates the reliability of the face detection.
220
+
221
+ - 'dominant_gender' (str): The dominant gender in the detected face.
222
+ Either "Man" or "Woman".
223
+
224
+ - 'gender' (dict): Confidence scores for each gender category.
225
+ - 'Man': Confidence score for the male gender.
226
+ - 'Woman': Confidence score for the female gender.
227
+
228
+ - 'dominant_emotion' (str): The dominant emotion in the detected face.
229
+ Possible values include "sad," "angry," "surprise," "fear," "happy,"
230
+ "disgust," and "neutral"
231
+
232
+ - 'emotion' (dict): Confidence scores for each emotion category.
233
+ - 'sad': Confidence score for sadness.
234
+ - 'angry': Confidence score for anger.
235
+ - 'surprise': Confidence score for surprise.
236
+ - 'fear': Confidence score for fear.
237
+ - 'happy': Confidence score for happiness.
238
+ - 'disgust': Confidence score for disgust.
239
+ - 'neutral': Confidence score for neutrality.
240
+
241
+ - 'dominant_race' (str): The dominant race in the detected face.
242
+ Possible values include "indian," "asian," "latino hispanic,"
243
+ "black," "middle eastern," and "white."
244
+
245
+ - 'race' (dict): Confidence scores for each race category.
246
+ - 'indian': Confidence score for Indian ethnicity.
247
+ - 'asian': Confidence score for Asian ethnicity.
248
+ - 'latino hispanic': Confidence score for Latino/Hispanic ethnicity.
249
+ - 'black': Confidence score for Black ethnicity.
250
+ - 'middle eastern': Confidence score for Middle Eastern ethnicity.
251
+ - 'white': Confidence score for White ethnicity.
252
+ """
253
+ return demography.analyze(
254
+ img_path=img_path,
255
+ actions=actions,
256
+ enforce_detection=enforce_detection,
257
+ detector_backend=detector_backend,
258
+ align=align,
259
+ expand_percentage=expand_percentage,
260
+ silent=silent,
261
+ anti_spoofing=anti_spoofing,
262
+ )
263
+
264
+
265
+ def find(
266
+ img_path: Union[str, np.ndarray],
267
+ db_path: str,
268
+ model_name: str = "VGG-Face",
269
+ distance_metric: str = "cosine",
270
+ enforce_detection: bool = True,
271
+ detector_backend: str = "opencv",
272
+ align: bool = True,
273
+ expand_percentage: int = 0,
274
+ threshold: Optional[float] = None,
275
+ normalization: str = "base",
276
+ silent: bool = False,
277
+ refresh_database: bool = True,
278
+ anti_spoofing: bool = False,
279
+ ) -> List[pd.DataFrame]:
280
+ """
281
+ Identify individuals in a database
282
+ Args:
283
+ img_path (str or np.ndarray): The exact path to the image, a numpy array in BGR format,
284
+ or a base64 encoded image. If the source image contains multiple faces, the result will
285
+ include information for each detected face.
286
+
287
+ db_path (string): Path to the folder containing image files. All detected faces
288
+ in the database will be considered in the decision-making process.
289
+
290
+ model_name (str): Model for face recognition. Options: VGG-Face, Facenet, Facenet512,
291
+ OpenFace, DeepFace, DeepID, Dlib, ArcFace, SFace and GhostFaceNet (default is VGG-Face).
292
+
293
+ distance_metric (string): Metric for measuring similarity. Options: 'cosine',
294
+ 'euclidean', 'euclidean_l2' (default is cosine).
295
+
296
+ enforce_detection (boolean): If no face is detected in an image, raise an exception.
297
+ Set to False to avoid the exception for low-resolution images (default is True).
298
+
299
+ detector_backend (string): face detector backend. Options: 'opencv', 'retinaface',
300
+ 'mtcnn', 'ssd', 'dlib', 'mediapipe', 'yolov8', 'centerface' or 'skip'
301
+ (default is opencv).
302
+
303
+ align (boolean): Perform alignment based on the eye positions (default is True).
304
+
305
+ expand_percentage (int): expand detected facial area with a percentage (default is 0).
306
+
307
+ threshold (float): Specify a threshold to determine whether a pair represents the same
308
+ person or different individuals. This threshold is used for comparing distances.
309
+ If left unset, default pre-tuned threshold values will be applied based on the specified
310
+ model name and distance metric (default is None).
311
+
312
+ normalization (string): Normalize the input image before feeding it to the model.
313
+ Options: base, raw, Facenet, Facenet2018, VGGFace, VGGFace2, ArcFace (default is base).
314
+
315
+ silent (boolean): Suppress or allow some log messages for a quieter analysis process
316
+ (default is False).
317
+
318
+ refresh_database (boolean): Synchronizes the images representation (pkl) file with the
319
+ directory/db files, if set to false, it will ignore any file changes inside the db_path
320
+ (default is True).
321
+
322
+ anti_spoofing (boolean): Flag to enable anti spoofing (default is False).
323
+
324
+ Returns:
325
+ results (List[pd.DataFrame]): A list of pandas dataframes. Each dataframe corresponds
326
+ to the identity information for an individual detected in the source image.
327
+ The DataFrame columns include:
328
+
329
+ - 'identity': Identity label of the detected individual.
330
+
331
+ - 'target_x', 'target_y', 'target_w', 'target_h': Bounding box coordinates of the
332
+ target face in the database.
333
+
334
+ - 'source_x', 'source_y', 'source_w', 'source_h': Bounding box coordinates of the
335
+ detected face in the source image.
336
+
337
+ - 'threshold': threshold to determine a pair whether same person or different persons
338
+
339
+ - 'distance': Similarity score between the faces based on the
340
+ specified model and distance metric
341
+ """
342
+ return recognition.find(
343
+ img_path=img_path,
344
+ db_path=db_path,
345
+ model_name=model_name,
346
+ distance_metric=distance_metric,
347
+ enforce_detection=enforce_detection,
348
+ detector_backend=detector_backend,
349
+ align=align,
350
+ expand_percentage=expand_percentage,
351
+ threshold=threshold,
352
+ normalization=normalization,
353
+ silent=silent,
354
+ refresh_database=refresh_database,
355
+ anti_spoofing=anti_spoofing,
356
+ )
357
+
358
+
359
+ def represent(
360
+ img_path: Union[str, np.ndarray],
361
+ model_name: str = "VGG-Face",
362
+ enforce_detection: bool = True,
363
+ detector_backend: str = "opencv",
364
+ align: bool = True,
365
+ expand_percentage: int = 0,
366
+ normalization: str = "base",
367
+ anti_spoofing: bool = False,
368
+ max_faces: Optional[int] = None,
369
+ ) -> List[Dict[str, Any]]:
370
+ """
371
+ Represent facial images as multi-dimensional vector embeddings.
372
+
373
+ Args:
374
+ img_path (str or np.ndarray): The exact path to the image, a numpy array in BGR format,
375
+ or a base64 encoded image. If the source image contains multiple faces, the result will
376
+ include information for each detected face.
377
+
378
+ model_name (str): Model for face recognition. Options: VGG-Face, Facenet, Facenet512,
379
+ OpenFace, DeepFace, DeepID, Dlib, ArcFace, SFace and GhostFaceNet
380
+ (default is VGG-Face.).
381
+
382
+ enforce_detection (boolean): If no face is detected in an image, raise an exception.
383
+ Default is True. Set to False to avoid the exception for low-resolution images
384
+ (default is True).
385
+
386
+ detector_backend (string): face detector backend. Options: 'opencv', 'retinaface',
387
+ 'mtcnn', 'ssd', 'dlib', 'mediapipe', 'yolov8', 'centerface' or 'skip'
388
+ (default is opencv).
389
+
390
+ align (boolean): Perform alignment based on the eye positions (default is True).
391
+
392
+ expand_percentage (int): expand detected facial area with a percentage (default is 0).
393
+
394
+ normalization (string): Normalize the input image before feeding it to the model.
395
+ Default is base. Options: base, raw, Facenet, Facenet2018, VGGFace, VGGFace2, ArcFace
396
+ (default is base).
397
+
398
+ anti_spoofing (boolean): Flag to enable anti spoofing (default is False).
399
+
400
+ max_faces (int): Set a limit on the number of faces to be processed (default is None).
401
+
402
+ Returns:
403
+ results (List[Dict[str, Any]]): A list of dictionaries, each containing the
404
+ following fields:
405
+
406
+ - embedding (List[float]): Multidimensional vector representing facial features.
407
+ The number of dimensions varies based on the reference model
408
+ (e.g., FaceNet returns 128 dimensions, VGG-Face returns 4096 dimensions).
409
+
410
+ - facial_area (dict): Detected facial area by face detection in dictionary format.
411
+ Contains 'x' and 'y' as the left-corner point, and 'w' and 'h'
412
+ as the width and height. If `detector_backend` is set to 'skip', it represents
413
+ the full image area and is nonsensical.
414
+
415
+ - face_confidence (float): Confidence score of face detection. If `detector_backend` is set
416
+ to 'skip', the confidence will be 0 and is nonsensical.
417
+ """
418
+ return representation.represent(
419
+ img_path=img_path,
420
+ model_name=model_name,
421
+ enforce_detection=enforce_detection,
422
+ detector_backend=detector_backend,
423
+ align=align,
424
+ expand_percentage=expand_percentage,
425
+ normalization=normalization,
426
+ anti_spoofing=anti_spoofing,
427
+ max_faces=max_faces,
428
+ )
429
+
430
+
431
+ def stream(
432
+ db_path: str = "",
433
+ model_name: str = "VGG-Face",
434
+ detector_backend: str = "opencv",
435
+ distance_metric: str = "cosine",
436
+ enable_face_analysis: bool = True,
437
+ source: Any = 0,
438
+ time_threshold: int = 5,
439
+ frame_threshold: int = 5,
440
+ anti_spoofing: bool = False,
441
+ ) -> None:
442
+ """
443
+ Run real time face recognition and facial attribute analysis
444
+
445
+ Args:
446
+ db_path (string): Path to the folder containing image files. All detected faces
447
+ in the database will be considered in the decision-making process.
448
+
449
+ model_name (str): Model for face recognition. Options: VGG-Face, Facenet, Facenet512,
450
+ OpenFace, DeepFace, DeepID, Dlib, ArcFace, SFace and GhostFaceNet (default is VGG-Face).
451
+
452
+ detector_backend (string): face detector backend. Options: 'opencv', 'retinaface',
453
+ 'mtcnn', 'ssd', 'dlib', 'mediapipe', 'yolov8', 'centerface' or 'skip'
454
+ (default is opencv).
455
+
456
+ distance_metric (string): Metric for measuring similarity. Options: 'cosine',
457
+ 'euclidean', 'euclidean_l2' (default is cosine).
458
+
459
+ enable_face_analysis (bool): Flag to enable face analysis (default is True).
460
+
461
+ source (Any): The source for the video stream (default is 0, which represents the
462
+ default camera).
463
+
464
+ time_threshold (int): The time threshold (in seconds) for face recognition (default is 5).
465
+
466
+ frame_threshold (int): The frame threshold for face recognition (default is 5).
467
+
468
+ anti_spoofing (boolean): Flag to enable anti spoofing (default is False).
469
+ Returns:
470
+ None
471
+ """
472
+
473
+ time_threshold = max(time_threshold, 1)
474
+ frame_threshold = max(frame_threshold, 1)
475
+
476
+ streaming.analysis(
477
+ db_path=db_path,
478
+ model_name=model_name,
479
+ detector_backend=detector_backend,
480
+ distance_metric=distance_metric,
481
+ enable_face_analysis=enable_face_analysis,
482
+ source=source,
483
+ time_threshold=time_threshold,
484
+ frame_threshold=frame_threshold,
485
+ anti_spoofing=anti_spoofing,
486
+ )
487
+
488
+
489
+ def extract_faces(
490
+ img_path: Union[str, np.ndarray],
491
+ detector_backend: str = "opencv",
492
+ enforce_detection: bool = True,
493
+ align: bool = True,
494
+ expand_percentage: int = 0,
495
+ grayscale: bool = False,
496
+ color_face: str = "rgb",
497
+ normalize_face: bool = True,
498
+ anti_spoofing: bool = False,
499
+ ) -> List[Dict[str, Any]]:
500
+ """
501
+ Extract faces from a given image
502
+
503
+ Args:
504
+ img_path (str or np.ndarray): Path to the first image. Accepts exact image path
505
+ as a string, numpy array (BGR), or base64 encoded images.
506
+
507
+ detector_backend (string): face detector backend. Options: 'opencv', 'retinaface',
508
+ 'mtcnn', 'ssd', 'dlib', 'mediapipe', 'yolov8', 'centerface' or 'skip'
509
+ (default is opencv).
510
+
511
+ enforce_detection (boolean): If no face is detected in an image, raise an exception.
512
+ Set to False to avoid the exception for low-resolution images (default is True).
513
+
514
+ align (bool): Flag to enable face alignment (default is True).
515
+
516
+ expand_percentage (int): expand detected facial area with a percentage (default is 0).
517
+
518
+ grayscale (boolean): (Deprecated) Flag to convert the output face image to grayscale
519
+ (default is False).
520
+
521
+ color_face (string): Color to return face image output. Options: 'rgb', 'bgr' or 'gray'
522
+ (default is 'rgb').
523
+
524
+ normalize_face (boolean): Flag to enable normalization (divide by 255) of the output
525
+ face image output face image normalization (default is True).
526
+
527
+ anti_spoofing (boolean): Flag to enable anti spoofing (default is False).
528
+
529
+ Returns:
530
+ results (List[Dict[str, Any]]): A list of dictionaries, where each dictionary contains:
531
+
532
+ - "face" (np.ndarray): The detected face as a NumPy array.
533
+
534
+ - "facial_area" (Dict[str, Any]): The detected face's regions as a dictionary containing:
535
+ - keys 'x', 'y', 'w', 'h' with int values
536
+ - keys 'left_eye', 'right_eye' with a tuple of 2 ints as values. left and right eyes
537
+ are eyes on the left and right respectively with respect to the person itself
538
+ instead of observer.
539
+
540
+ - "confidence" (float): The confidence score associated with the detected face.
541
+
542
+ - "is_real" (boolean): antispoofing analyze result. this key is just available in the
543
+ result only if anti_spoofing is set to True in input arguments.
544
+
545
+ - "antispoof_score" (float): score of antispoofing analyze result. this key is
546
+ just available in the result only if anti_spoofing is set to True in input arguments.
547
+ """
548
+
549
+ return detection.extract_faces(
550
+ img_path=img_path,
551
+ detector_backend=detector_backend,
552
+ enforce_detection=enforce_detection,
553
+ align=align,
554
+ expand_percentage=expand_percentage,
555
+ grayscale=grayscale,
556
+ color_face=color_face,
557
+ normalize_face=normalize_face,
558
+ anti_spoofing=anti_spoofing,
559
+ )
560
+
561
+
562
+ def cli() -> None:
563
+ """
564
+ command line interface function will be offered in this block
565
+ """
566
+ import fire
567
+
568
+ fire.Fire()
569
+
570
+
571
+ # deprecated function(s)
572
+
573
+
574
+ def detectFace(
575
+ img_path: Union[str, np.ndarray],
576
+ target_size: tuple = (224, 224),
577
+ detector_backend: str = "opencv",
578
+ enforce_detection: bool = True,
579
+ align: bool = True,
580
+ ) -> Union[np.ndarray, None]:
581
+ """
582
+ Deprecated face detection function. Use extract_faces for same functionality.
583
+
584
+ Args:
585
+ img_path (str or np.ndarray): Path to the first image. Accepts exact image path
586
+ as a string, numpy array (BGR), or base64 encoded images.
587
+
588
+ target_size (tuple): final shape of facial image. black pixels will be
589
+ added to resize the image (default is (224, 224)).
590
+
591
+ detector_backend (string): face detector backend. Options: 'opencv', 'retinaface',
592
+ 'mtcnn', 'ssd', 'dlib', 'mediapipe', 'yolov8', 'centerface' or 'skip'
593
+ (default is opencv).
594
+
595
+ enforce_detection (boolean): If no face is detected in an image, raise an exception.
596
+ Set to False to avoid the exception for low-resolution images (default is True).
597
+
598
+ align (bool): Flag to enable face alignment (default is True).
599
+
600
+ Returns:
601
+ img (np.ndarray): detected (and aligned) facial area image as numpy array
602
+ """
603
+ logger.warn("Function detectFace is deprecated. Use extract_faces instead.")
604
+ face_objs = extract_faces(
605
+ img_path=img_path,
606
+ detector_backend=detector_backend,
607
+ grayscale=False,
608
+ enforce_detection=enforce_detection,
609
+ align=align,
610
+ )
611
+ extracted_face = None
612
+ if len(face_objs) > 0:
613
+ extracted_face = face_objs[0]["face"]
614
+ extracted_face = preprocessing.resize_image(img=extracted_face, target_size=target_size)
615
+ return extracted_face
deepface/deepface/__init__.py ADDED
@@ -0,0 +1 @@
 
 
1
+ __version__ = "0.0.94"
deepface/deepface/api/__init__.py ADDED
File without changes
deepface/deepface/api/postman/deepface-api.postman_collection.json ADDED
@@ -0,0 +1,102 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "info": {
3
+ "_postman_id": "4c0b144e-4294-4bdd-8072-bcb326b1fed2",
4
+ "name": "deepface-api",
5
+ "schema": "https://schema.getpostman.com/json/collection/v2.1.0/collection.json"
6
+ },
7
+ "item": [
8
+ {
9
+ "name": "Represent",
10
+ "request": {
11
+ "method": "POST",
12
+ "header": [],
13
+ "body": {
14
+ "mode": "raw",
15
+ "raw": "{\n \"model_name\": \"Facenet\",\n \"img\": \"/Users/sefik/Desktop/deepface/tests/dataset/img1.jpg\"\n}",
16
+ "options": {
17
+ "raw": {
18
+ "language": "json"
19
+ }
20
+ }
21
+ },
22
+ "url": {
23
+ "raw": "http://127.0.0.1:5000/represent",
24
+ "protocol": "http",
25
+ "host": [
26
+ "127",
27
+ "0",
28
+ "0",
29
+ "1"
30
+ ],
31
+ "port": "5000",
32
+ "path": [
33
+ "represent"
34
+ ]
35
+ }
36
+ },
37
+ "response": []
38
+ },
39
+ {
40
+ "name": "Face verification",
41
+ "request": {
42
+ "method": "POST",
43
+ "header": [],
44
+ "body": {
45
+ "mode": "raw",
46
+ "raw": " {\n \t\"img1_path\": \"/Users/sefik/Desktop/deepface/tests/dataset/img1.jpg\",\n \"img2_path\": \"/Users/sefik/Desktop/deepface/tests/dataset/img2.jpg\",\n \"model_name\": \"Facenet\",\n \"detector_backend\": \"mtcnn\",\n \"distance_metric\": \"euclidean\"\n }",
47
+ "options": {
48
+ "raw": {
49
+ "language": "json"
50
+ }
51
+ }
52
+ },
53
+ "url": {
54
+ "raw": "http://127.0.0.1:5000/verify",
55
+ "protocol": "http",
56
+ "host": [
57
+ "127",
58
+ "0",
59
+ "0",
60
+ "1"
61
+ ],
62
+ "port": "5000",
63
+ "path": [
64
+ "verify"
65
+ ]
66
+ }
67
+ },
68
+ "response": []
69
+ },
70
+ {
71
+ "name": "Face analysis",
72
+ "request": {
73
+ "method": "POST",
74
+ "header": [],
75
+ "body": {
76
+ "mode": "raw",
77
+ "raw": "{\n \"img_path\": \"/Users/sefik/Desktop/deepface/tests/dataset/couple.jpg\",\n \"actions\": [\"age\", \"gender\", \"emotion\", \"race\"]\n}",
78
+ "options": {
79
+ "raw": {
80
+ "language": "json"
81
+ }
82
+ }
83
+ },
84
+ "url": {
85
+ "raw": "http://127.0.0.1:5000/analyze",
86
+ "protocol": "http",
87
+ "host": [
88
+ "127",
89
+ "0",
90
+ "0",
91
+ "1"
92
+ ],
93
+ "port": "5000",
94
+ "path": [
95
+ "analyze"
96
+ ]
97
+ }
98
+ },
99
+ "response": []
100
+ }
101
+ ]
102
+ }
deepface/deepface/api/src/__init__.py ADDED
File without changes
deepface/deepface/api/src/api.py ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ import argparse
2
+ import app
3
+
4
+ if __name__ == "__main__":
5
+ deepface_app = app.create_app()
6
+ parser = argparse.ArgumentParser()
7
+ parser.add_argument("-p", "--port", type=int, default=5000, help="Port of serving api")
8
+ args = parser.parse_args()
9
+ deepface_app.run(host="0.0.0.0", port=args.port)
deepface/deepface/api/src/app.py ADDED
@@ -0,0 +1,18 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # 3rd parth dependencies
2
+ from flask import Flask
3
+ from flask_cors import CORS
4
+
5
+ # project dependencies
6
+ from deepface import DeepFace
7
+ from deepface.api.src.modules.core.routes import blueprint
8
+ from deepface.commons.logger import Logger
9
+
10
+ logger = Logger()
11
+
12
+
13
+ def create_app():
14
+ app = Flask(__name__)
15
+ CORS(app)
16
+ app.register_blueprint(blueprint)
17
+ logger.info(f"Welcome to DeepFace API v{DeepFace.__version__}!")
18
+ return app
deepface/deepface/api/src/modules/__init__.py ADDED
File without changes
deepface/deepface/api/src/modules/core/__init__.py ADDED
File without changes
deepface/deepface/api/src/modules/core/routes.py ADDED
@@ -0,0 +1,96 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from flask import Blueprint, request
2
+ from deepface import DeepFace
3
+ from deepface.api.src.modules.core import service
4
+ from deepface.commons.logger import Logger
5
+
6
+ logger = Logger()
7
+
8
+ blueprint = Blueprint("routes", __name__)
9
+
10
+
11
+ @blueprint.route("/")
12
+ def home():
13
+ return f"<h1>Welcome to DeepFace API v{DeepFace.__version__}!</h1>"
14
+
15
+
16
+ @blueprint.route("/represent", methods=["POST"])
17
+ def represent():
18
+ input_args = request.get_json()
19
+
20
+ if input_args is None:
21
+ return {"message": "empty input set passed"}
22
+
23
+ img_path = input_args.get("img") or input_args.get("img_path")
24
+ if img_path is None:
25
+ return {"message": "you must pass img_path input"}
26
+
27
+ obj = service.represent(
28
+ img_path=img_path,
29
+ model_name=input_args.get("model_name", "VGG-Face"),
30
+ detector_backend=input_args.get("detector_backend", "opencv"),
31
+ enforce_detection=input_args.get("enforce_detection", True),
32
+ align=input_args.get("align", True),
33
+ anti_spoofing=input_args.get("anti_spoofing", False),
34
+ max_faces=input_args.get("max_faces"),
35
+ )
36
+
37
+ logger.debug(obj)
38
+
39
+ return obj
40
+
41
+
42
+ @blueprint.route("/verify", methods=["POST"])
43
+ def verify():
44
+ input_args = request.get_json()
45
+
46
+ if input_args is None:
47
+ return {"message": "empty input set passed"}
48
+
49
+ img1_path = input_args.get("img1") or input_args.get("img1_path")
50
+ img2_path = input_args.get("img2") or input_args.get("img2_path")
51
+
52
+ if img1_path is None:
53
+ return {"message": "you must pass img1_path input"}
54
+
55
+ if img2_path is None:
56
+ return {"message": "you must pass img2_path input"}
57
+
58
+ verification = service.verify(
59
+ img1_path=img1_path,
60
+ img2_path=img2_path,
61
+ model_name=input_args.get("model_name", "VGG-Face"),
62
+ detector_backend=input_args.get("detector_backend", "opencv"),
63
+ distance_metric=input_args.get("distance_metric", "cosine"),
64
+ align=input_args.get("align", True),
65
+ enforce_detection=input_args.get("enforce_detection", True),
66
+ anti_spoofing=input_args.get("anti_spoofing", False),
67
+ )
68
+
69
+ logger.debug(verification)
70
+
71
+ return verification
72
+
73
+
74
+ @blueprint.route("/analyze", methods=["POST"])
75
+ def analyze():
76
+ input_args = request.get_json()
77
+
78
+ if input_args is None:
79
+ return {"message": "empty input set passed"}
80
+
81
+ img_path = input_args.get("img") or input_args.get("img_path")
82
+ if img_path is None:
83
+ return {"message": "you must pass img_path input"}
84
+
85
+ demographies = service.analyze(
86
+ img_path=img_path,
87
+ actions=input_args.get("actions", ["age", "gender", "emotion", "race"]),
88
+ detector_backend=input_args.get("detector_backend", "opencv"),
89
+ enforce_detection=input_args.get("enforce_detection", True),
90
+ align=input_args.get("align", True),
91
+ anti_spoofing=input_args.get("anti_spoofing", False),
92
+ )
93
+
94
+ logger.debug(demographies)
95
+
96
+ return demographies
deepface/deepface/api/src/modules/core/service.py ADDED
@@ -0,0 +1,88 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # built-in dependencies
2
+ import traceback
3
+ from typing import Optional
4
+
5
+ # project dependencies
6
+ from deepface import DeepFace
7
+
8
+ # pylint: disable=broad-except
9
+
10
+
11
+ def represent(
12
+ img_path: str,
13
+ model_name: str,
14
+ detector_backend: str,
15
+ enforce_detection: bool,
16
+ align: bool,
17
+ anti_spoofing: bool,
18
+ max_faces: Optional[int] = None,
19
+ ):
20
+ try:
21
+ result = {}
22
+ embedding_objs = DeepFace.represent(
23
+ img_path=img_path,
24
+ model_name=model_name,
25
+ detector_backend=detector_backend,
26
+ enforce_detection=enforce_detection,
27
+ align=align,
28
+ anti_spoofing=anti_spoofing,
29
+ max_faces=max_faces,
30
+ )
31
+ result["results"] = embedding_objs
32
+ return result
33
+ except Exception as err:
34
+ tb_str = traceback.format_exc()
35
+ return {"error": f"Exception while representing: {str(err)} - {tb_str}"}, 400
36
+
37
+
38
+ def verify(
39
+ img1_path: str,
40
+ img2_path: str,
41
+ model_name: str,
42
+ detector_backend: str,
43
+ distance_metric: str,
44
+ enforce_detection: bool,
45
+ align: bool,
46
+ anti_spoofing: bool,
47
+ ):
48
+ try:
49
+ obj = DeepFace.verify(
50
+ img1_path=img1_path,
51
+ img2_path=img2_path,
52
+ model_name=model_name,
53
+ detector_backend=detector_backend,
54
+ distance_metric=distance_metric,
55
+ align=align,
56
+ enforce_detection=enforce_detection,
57
+ anti_spoofing=anti_spoofing,
58
+ )
59
+ return obj
60
+ except Exception as err:
61
+ tb_str = traceback.format_exc()
62
+ return {"error": f"Exception while verifying: {str(err)} - {tb_str}"}, 400
63
+
64
+
65
+ def analyze(
66
+ img_path: str,
67
+ actions: list,
68
+ detector_backend: str,
69
+ enforce_detection: bool,
70
+ align: bool,
71
+ anti_spoofing: bool,
72
+ ):
73
+ try:
74
+ result = {}
75
+ demographies = DeepFace.analyze(
76
+ img_path=img_path,
77
+ actions=actions,
78
+ detector_backend=detector_backend,
79
+ enforce_detection=enforce_detection,
80
+ align=align,
81
+ silent=True,
82
+ anti_spoofing=anti_spoofing,
83
+ )
84
+ result["results"] = demographies
85
+ return result
86
+ except Exception as err:
87
+ tb_str = traceback.format_exc()
88
+ return {"error": f"Exception while analyzing: {str(err)} - {tb_str}"}, 400
deepface/deepface/commons/__init__.py ADDED
File without changes
deepface/deepface/commons/constant.py ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ import os
2
+
3
+ SRC_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
4
+ ROOT_DIR = os.path.dirname(SRC_DIR)
deepface/deepface/commons/folder_utils.py ADDED
@@ -0,0 +1,34 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ from deepface.commons.logger import Logger
3
+
4
+ logger = Logger()
5
+
6
+
7
+ def initialize_folder() -> None:
8
+ """
9
+ Initialize the folder for storing model weights.
10
+
11
+ Raises:
12
+ OSError: if the folder cannot be created.
13
+ """
14
+ home = get_deepface_home()
15
+ deepface_home_path = os.path.join(home, ".deepface")
16
+ weights_path = os.path.join(deepface_home_path, "weights")
17
+
18
+ if not os.path.exists(deepface_home_path):
19
+ os.makedirs(deepface_home_path, exist_ok=True)
20
+ logger.info(f"Directory {deepface_home_path} has been created")
21
+
22
+ if not os.path.exists(weights_path):
23
+ os.makedirs(weights_path, exist_ok=True)
24
+ logger.info(f"Directory {weights_path} has been created")
25
+
26
+
27
+ def get_deepface_home() -> str:
28
+ """
29
+ Get the home directory for storing model weights
30
+
31
+ Returns:
32
+ str: the home directory.
33
+ """
34
+ return str(os.getenv("DEEPFACE_HOME", default=os.path.expanduser("~")))
deepface/deepface/commons/image_utils.py ADDED
@@ -0,0 +1,148 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # built-in dependencies
2
+ import os
3
+ import io
4
+ from typing import List, Union, Tuple
5
+ import hashlib
6
+ import base64
7
+ from pathlib import Path
8
+
9
+ # 3rd party dependencies
10
+ import requests
11
+ import numpy as np
12
+ import cv2
13
+ from PIL import Image
14
+
15
+
16
+ def list_images(path: str) -> List[str]:
17
+ """
18
+ List images in a given path
19
+ Args:
20
+ path (str): path's location
21
+ Returns:
22
+ images (list): list of exact image paths
23
+ """
24
+ images = []
25
+ for r, _, f in os.walk(path):
26
+ for file in f:
27
+ exact_path = os.path.join(r, file)
28
+
29
+ ext_lower = os.path.splitext(exact_path)[-1].lower()
30
+
31
+ if ext_lower not in {".jpg", ".jpeg", ".png"}:
32
+ continue
33
+
34
+ with Image.open(exact_path) as img: # lazy
35
+ if img.format.lower() in {"jpeg", "png"}:
36
+ images.append(exact_path)
37
+ return images
38
+
39
+
40
+ def find_image_hash(file_path: str) -> str:
41
+ """
42
+ Find the hash of given image file with its properties
43
+ finding the hash of image content is costly operation
44
+ Args:
45
+ file_path (str): exact image path
46
+ Returns:
47
+ hash (str): digest with sha1 algorithm
48
+ """
49
+ file_stats = os.stat(file_path)
50
+
51
+ # some properties
52
+ file_size = file_stats.st_size
53
+ creation_time = file_stats.st_ctime
54
+ modification_time = file_stats.st_mtime
55
+
56
+ properties = f"{file_size}-{creation_time}-{modification_time}"
57
+
58
+ hasher = hashlib.sha1()
59
+ hasher.update(properties.encode("utf-8"))
60
+ return hasher.hexdigest()
61
+
62
+
63
+ def load_image(img: Union[str, np.ndarray]) -> Tuple[np.ndarray, str]:
64
+ """
65
+ Load image from path, url, base64 or numpy array.
66
+ Args:
67
+ img: a path, url, base64 or numpy array.
68
+ Returns:
69
+ image (numpy array): the loaded image in BGR format
70
+ image name (str): image name itself
71
+ """
72
+
73
+ # The image is already a numpy array
74
+ if isinstance(img, np.ndarray):
75
+ return img, "numpy array"
76
+
77
+ if isinstance(img, Path):
78
+ img = str(img)
79
+
80
+ if not isinstance(img, str):
81
+ raise ValueError(f"img must be numpy array or str but it is {type(img)}")
82
+
83
+ # The image is a base64 string
84
+ if img.startswith("data:image/"):
85
+ return load_image_from_base64(img), "base64 encoded string"
86
+
87
+ # The image is a url
88
+ if img.lower().startswith(("http://", "https://")):
89
+ return load_image_from_web(url=img), img
90
+
91
+ # The image is a path
92
+ if not os.path.isfile(img):
93
+ raise ValueError(f"Confirm that {img} exists")
94
+
95
+ # image must be a file on the system then
96
+
97
+ # image name must have english characters
98
+ if not img.isascii():
99
+ raise ValueError(f"Input image must not have non-english characters - {img}")
100
+
101
+ img_obj_bgr = cv2.imread(img)
102
+ # img_obj_rgb = cv2.cvtColor(img_obj_bgr, cv2.COLOR_BGR2RGB)
103
+ return img_obj_bgr, img
104
+
105
+
106
+ def load_image_from_base64(uri: str) -> np.ndarray:
107
+ """
108
+ Load image from base64 string.
109
+ Args:
110
+ uri: a base64 string.
111
+ Returns:
112
+ numpy array: the loaded image.
113
+ """
114
+
115
+ encoded_data_parts = uri.split(",")
116
+
117
+ if len(encoded_data_parts) < 2:
118
+ raise ValueError("format error in base64 encoded string")
119
+
120
+ encoded_data = encoded_data_parts[1]
121
+ decoded_bytes = base64.b64decode(encoded_data)
122
+
123
+ # similar to find functionality, we are just considering these extensions
124
+ # content type is safer option than file extension
125
+ with Image.open(io.BytesIO(decoded_bytes)) as img:
126
+ file_type = img.format.lower()
127
+ if file_type not in {"jpeg", "png"}:
128
+ raise ValueError(f"Input image can be jpg or png, but it is {file_type}")
129
+
130
+ nparr = np.fromstring(decoded_bytes, np.uint8)
131
+ img_bgr = cv2.imdecode(nparr, cv2.IMREAD_COLOR)
132
+ # img_rgb = cv2.cvtColor(img_bgr, cv2.COLOR_BGR2RGB)
133
+ return img_bgr
134
+
135
+
136
+ def load_image_from_web(url: str) -> np.ndarray:
137
+ """
138
+ Loading an image from web
139
+ Args:
140
+ url: link for the image
141
+ Returns:
142
+ img (np.ndarray): equivalent to pre-loaded image from opencv (BGR format)
143
+ """
144
+ response = requests.get(url, stream=True, timeout=60)
145
+ response.raise_for_status()
146
+ image_array = np.asarray(bytearray(response.raw.read()), dtype=np.uint8)
147
+ img = cv2.imdecode(image_array, cv2.IMREAD_COLOR)
148
+ return img
deepface/deepface/commons/logger.py ADDED
@@ -0,0 +1,57 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import logging
3
+ from datetime import datetime
4
+
5
+ # pylint: disable=broad-except
6
+ class Logger:
7
+ """
8
+ A Logger class for logging messages with a specific log level.
9
+
10
+ The class follows the singleton design pattern, ensuring that only one
11
+ instance of the Logger is created. The parameters of the first instance
12
+ are preserved across all instances.
13
+ """
14
+
15
+ __instance = None
16
+
17
+ def __new__(cls):
18
+ if cls.__instance is None:
19
+ cls.__instance = super(Logger, cls).__new__(cls)
20
+ return cls.__instance
21
+
22
+ def __init__(self):
23
+ if not hasattr(self, "_singleton_initialized"):
24
+ self._singleton_initialized = True # to prevent multiple initializations
25
+ log_level = os.environ.get("DEEPFACE_LOG_LEVEL", str(logging.INFO))
26
+ try:
27
+ self.log_level = int(log_level)
28
+ except Exception as err:
29
+ self.dump_log(
30
+ f"Exception while parsing $DEEPFACE_LOG_LEVEL."
31
+ f"Expected int but it is {log_level} ({str(err)})."
32
+ "Setting app log level to info."
33
+ )
34
+ self.log_level = logging.INFO
35
+
36
+ def info(self, message):
37
+ if self.log_level <= logging.INFO:
38
+ self.dump_log(f"{message}")
39
+
40
+ def debug(self, message):
41
+ if self.log_level <= logging.DEBUG:
42
+ self.dump_log(f"🕷️ {message}")
43
+
44
+ def warn(self, message):
45
+ if self.log_level <= logging.WARNING:
46
+ self.dump_log(f"⚠️ {message}")
47
+
48
+ def error(self, message):
49
+ if self.log_level <= logging.ERROR:
50
+ self.dump_log(f"🔴 {message}")
51
+
52
+ def critical(self, message):
53
+ if self.log_level <= logging.CRITICAL:
54
+ self.dump_log(f"💥 {message}")
55
+
56
+ def dump_log(self, message):
57
+ print(f"{str(datetime.now())[2:-7]} - {message}")
deepface/deepface/commons/package_utils.py ADDED
@@ -0,0 +1,65 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # built-in dependencies
2
+ import hashlib
3
+
4
+ # 3rd party dependencies
5
+ import tensorflow as tf
6
+
7
+ # package dependencies
8
+ from deepface.commons.logger import Logger
9
+
10
+ logger = Logger()
11
+
12
+
13
+ def get_tf_major_version() -> int:
14
+ """
15
+ Find tensorflow's major version
16
+ Returns
17
+ major_version (int)
18
+ """
19
+ return int(tf.__version__.split(".", maxsplit=1)[0])
20
+
21
+
22
+ def get_tf_minor_version() -> int:
23
+ """
24
+ Find tensorflow's minor version
25
+ Returns
26
+ minor_version (int)
27
+ """
28
+ return int(tf.__version__.split(".", maxsplit=-1)[1])
29
+
30
+
31
+ def validate_for_keras3():
32
+ tf_major = get_tf_major_version()
33
+ tf_minor = get_tf_minor_version()
34
+
35
+ # tf_keras is a must dependency after tf 2.16
36
+ if tf_major == 1 or (tf_major == 2 and tf_minor < 16):
37
+ return
38
+
39
+ try:
40
+ import tf_keras
41
+
42
+ logger.debug(f"tf_keras is already available - {tf_keras.__version__}")
43
+ except ImportError as err:
44
+ # you may consider to install that package here
45
+ raise ValueError(
46
+ f"You have tensorflow {tf.__version__} and this requires "
47
+ "tf-keras package. Please run `pip install tf-keras` "
48
+ "or downgrade your tensorflow."
49
+ ) from err
50
+
51
+
52
+ def find_file_hash(file_path: str, hash_algorithm: str = "sha256") -> str:
53
+ """
54
+ Find the hash of a given file with its content
55
+ Args:
56
+ file_path (str): exact path of a given file
57
+ hash_algorithm (str): hash algorithm
58
+ Returns:
59
+ hash (str)
60
+ """
61
+ hash_func = hashlib.new(hash_algorithm)
62
+ with open(file_path, "rb") as f:
63
+ while chunk := f.read(8192):
64
+ hash_func.update(chunk)
65
+ return hash_func.hexdigest()
deepface/deepface/commons/weight_utils.py ADDED
@@ -0,0 +1,92 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # built-in dependencies
2
+ import os
3
+ from typing import Optional
4
+ import zipfile
5
+ import bz2
6
+
7
+ # 3rd party dependencies
8
+ import gdown
9
+
10
+ # project dependencies
11
+ from deepface.commons import folder_utils, package_utils
12
+ from deepface.commons.logger import Logger
13
+
14
+ tf_version = package_utils.get_tf_major_version()
15
+ if tf_version == 1:
16
+ from keras.models import Sequential
17
+ else:
18
+ from tensorflow.keras.models import Sequential
19
+
20
+ logger = Logger()
21
+
22
+
23
+ def download_weights_if_necessary(
24
+ file_name: str, source_url: str, compress_type: Optional[str] = None
25
+ ) -> str:
26
+ """
27
+ Download the weights of a pre-trained model from external source if not downloaded yet.
28
+ Args:
29
+ file_name (str): target file name with extension
30
+ source_url (url): source url to be downloaded
31
+ compress_type (optional str): compress type e.g. zip or bz2
32
+ Returns
33
+ target_file (str): exact path for the target file
34
+ """
35
+ home = folder_utils.get_deepface_home()
36
+
37
+ target_file = os.path.join(home, ".deepface/weights", file_name)
38
+
39
+ if os.path.isfile(target_file):
40
+ logger.debug(f"{file_name} is already available at {target_file}")
41
+ return target_file
42
+
43
+ try:
44
+ logger.info(f"🔗 {file_name} will be downloaded from {source_url} to {target_file}...")
45
+
46
+ if compress_type is None:
47
+ gdown.download(source_url, target_file, quiet=False)
48
+ elif compress_type is not None:
49
+ gdown.download(source_url, f"{target_file}.{compress_type}", quiet=False)
50
+
51
+ except Exception as err:
52
+ raise ValueError(
53
+ f"⛓️‍💥 An exception occurred while downloading {file_name} from {source_url}. "
54
+ f"Consider downloading it manually to {target_file}."
55
+ ) from err
56
+
57
+ # uncompress downloaded file
58
+ if compress_type == "zip":
59
+ with zipfile.ZipFile(f"{target_file}.zip", "r") as zip_ref:
60
+ zip_ref.extractall(os.path.join(home, ".deepface/weights"))
61
+ logger.info(f"{target_file}.zip unzipped")
62
+ elif compress_type == "bz2":
63
+ bz2file = bz2.BZ2File(f"{target_file}.bz2")
64
+ data = bz2file.read()
65
+ with open(target_file, "wb") as f:
66
+ f.write(data)
67
+ logger.info(f"{target_file}.bz2 unzipped")
68
+
69
+ return target_file
70
+
71
+
72
+ def load_model_weights(model: Sequential, weight_file: str) -> Sequential:
73
+ """
74
+ Load pre-trained weights for a given model
75
+ Args:
76
+ model (keras.models.Sequential): pre-built model
77
+ weight_file (str): exact path of pre-trained weights
78
+ Returns:
79
+ model (keras.models.Sequential): pre-built model with
80
+ updated weights
81
+ """
82
+ try:
83
+ model.load_weights(weight_file)
84
+ except Exception as err:
85
+ raise ValueError(
86
+ f"An exception occurred while loading the pre-trained weights from {weight_file}."
87
+ "This might have happened due to an interruption during the download."
88
+ "You may want to delete it and allow DeepFace to download it again during the next run."
89
+ "If the issue persists, consider downloading the file directly from the source "
90
+ "and copying it to the target folder."
91
+ ) from err
92
+ return model
deepface/deepface/models/Demography.py ADDED
@@ -0,0 +1,22 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import Union
2
+ from abc import ABC, abstractmethod
3
+ import numpy as np
4
+ from deepface.commons import package_utils
5
+
6
+ tf_version = package_utils.get_tf_major_version()
7
+ if tf_version == 1:
8
+ from keras.models import Model
9
+ else:
10
+ from tensorflow.keras.models import Model
11
+
12
+ # Notice that all facial attribute analysis models must be inherited from this class
13
+
14
+
15
+ # pylint: disable=too-few-public-methods
16
+ class Demography(ABC):
17
+ model: Model
18
+ model_name: str
19
+
20
+ @abstractmethod
21
+ def predict(self, img: np.ndarray) -> Union[np.ndarray, np.float64]:
22
+ pass
deepface/deepface/models/Detector.py ADDED
@@ -0,0 +1,69 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import List, Tuple, Optional
2
+ from abc import ABC, abstractmethod
3
+ from dataclasses import dataclass
4
+ import numpy as np
5
+
6
+ # Notice that all facial detector models must be inherited from this class
7
+
8
+
9
+ # pylint: disable=unnecessary-pass, too-few-public-methods
10
+ class Detector(ABC):
11
+ @abstractmethod
12
+ def detect_faces(self, img: np.ndarray) -> List["FacialAreaRegion"]:
13
+ """
14
+ Interface for detect and align face
15
+
16
+ Args:
17
+ img (np.ndarray): pre-loaded image as numpy array
18
+
19
+ Returns:
20
+ results (List[FacialAreaRegion]): A list of FacialAreaRegion objects
21
+ where each object contains:
22
+
23
+ - facial_area (FacialAreaRegion): The facial area region represented
24
+ as x, y, w, h, left_eye and right_eye. left eye and right eye are
25
+ eyes on the left and right respectively with respect to the person
26
+ instead of observer.
27
+ """
28
+ pass
29
+
30
+
31
+ @dataclass
32
+ class FacialAreaRegion:
33
+ """
34
+ Initialize a Face object.
35
+
36
+ Args:
37
+ x (int): The x-coordinate of the top-left corner of the bounding box.
38
+ y (int): The y-coordinate of the top-left corner of the bounding box.
39
+ w (int): The width of the bounding box.
40
+ h (int): The height of the bounding box.
41
+ left_eye (tuple): The coordinates (x, y) of the left eye with respect to
42
+ the person instead of observer. Default is None.
43
+ right_eye (tuple): The coordinates (x, y) of the right eye with respect to
44
+ the person instead of observer. Default is None.
45
+ confidence (float, optional): Confidence score associated with the face detection.
46
+ Default is None.
47
+ """
48
+ x: int
49
+ y: int
50
+ w: int
51
+ h: int
52
+ left_eye: Optional[Tuple[int, int]] = None
53
+ right_eye: Optional[Tuple[int, int]] = None
54
+ confidence: Optional[float] = None
55
+
56
+
57
+ @dataclass
58
+ class DetectedFace:
59
+ """
60
+ Initialize detected face object.
61
+
62
+ Args:
63
+ img (np.ndarray): detected face image as numpy array
64
+ facial_area (FacialAreaRegion): detected face's metadata (e.g. bounding box)
65
+ confidence (float): confidence score for face detection
66
+ """
67
+ img: np.ndarray
68
+ facial_area: FacialAreaRegion
69
+ confidence: float
deepface/deepface/models/FacialRecognition.py ADDED
@@ -0,0 +1,29 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from abc import ABC
2
+ from typing import Any, Union, List, Tuple
3
+ import numpy as np
4
+ from deepface.commons import package_utils
5
+
6
+ tf_version = package_utils.get_tf_major_version()
7
+ if tf_version == 2:
8
+ from tensorflow.keras.models import Model
9
+ else:
10
+ from keras.models import Model
11
+
12
+ # Notice that all facial recognition models must be inherited from this class
13
+
14
+ # pylint: disable=too-few-public-methods
15
+ class FacialRecognition(ABC):
16
+ model: Union[Model, Any]
17
+ model_name: str
18
+ input_shape: Tuple[int, int]
19
+ output_shape: int
20
+
21
+ def forward(self, img: np.ndarray) -> List[float]:
22
+ if not isinstance(self.model, Model):
23
+ raise ValueError(
24
+ "You must overwrite forward method if it is not a keras model,"
25
+ f"but {self.model_name} not overwritten!"
26
+ )
27
+ # model.predict causes memory issue when it is called in a for loop
28
+ # embedding = model.predict(img, verbose=0)[0].tolist()
29
+ return self.model(img, training=False).numpy()[0].tolist()
deepface/deepface/models/__init__.py ADDED
File without changes
deepface/deepface/models/demography/Age.py ADDED
@@ -0,0 +1,89 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # 3rd party dependencies
2
+ import numpy as np
3
+
4
+ # project dependencies
5
+ from deepface.models.facial_recognition import VGGFace
6
+ from deepface.commons import package_utils, weight_utils
7
+ from deepface.models.Demography import Demography
8
+ from deepface.commons.logger import Logger
9
+
10
+ logger = Logger()
11
+
12
+ # ----------------------------------------
13
+ # dependency configurations
14
+
15
+ tf_version = package_utils.get_tf_major_version()
16
+
17
+ if tf_version == 1:
18
+ from keras.models import Model, Sequential
19
+ from keras.layers import Convolution2D, Flatten, Activation
20
+ else:
21
+ from tensorflow.keras.models import Model, Sequential
22
+ from tensorflow.keras.layers import Convolution2D, Flatten, Activation
23
+
24
+ # ----------------------------------------
25
+
26
+ # pylint: disable=too-few-public-methods
27
+ class ApparentAgeClient(Demography):
28
+ """
29
+ Age model class
30
+ """
31
+
32
+ def __init__(self):
33
+ self.model = load_model()
34
+ self.model_name = "Age"
35
+
36
+ def predict(self, img: np.ndarray) -> np.float64:
37
+ # model.predict causes memory issue when it is called in a for loop
38
+ # age_predictions = self.model.predict(img, verbose=0)[0, :]
39
+ age_predictions = self.model(img, training=False).numpy()[0, :]
40
+ return find_apparent_age(age_predictions)
41
+
42
+
43
+ def load_model(
44
+ url="https://github.com/serengil/deepface_models/releases/download/v1.0/age_model_weights.h5",
45
+ ) -> Model:
46
+ """
47
+ Construct age model, download its weights and load
48
+ Returns:
49
+ model (Model)
50
+ """
51
+
52
+ model = VGGFace.base_model()
53
+
54
+ # --------------------------
55
+
56
+ classes = 101
57
+ base_model_output = Sequential()
58
+ base_model_output = Convolution2D(classes, (1, 1), name="predictions")(model.layers[-4].output)
59
+ base_model_output = Flatten()(base_model_output)
60
+ base_model_output = Activation("softmax")(base_model_output)
61
+
62
+ # --------------------------
63
+
64
+ age_model = Model(inputs=model.input, outputs=base_model_output)
65
+
66
+ # --------------------------
67
+
68
+ # load weights
69
+ weight_file = weight_utils.download_weights_if_necessary(
70
+ file_name="age_model_weights.h5", source_url=url
71
+ )
72
+
73
+ age_model = weight_utils.load_model_weights(
74
+ model=age_model, weight_file=weight_file
75
+ )
76
+
77
+ return age_model
78
+
79
+ def find_apparent_age(age_predictions: np.ndarray) -> np.float64:
80
+ """
81
+ Find apparent age prediction from a given probas of ages
82
+ Args:
83
+ age_predictions (?)
84
+ Returns:
85
+ apparent_age (float)
86
+ """
87
+ output_indexes = np.arange(0, 101)
88
+ apparent_age = np.sum(age_predictions * output_indexes)
89
+ return apparent_age
deepface/deepface/models/demography/Emotion.py ADDED
@@ -0,0 +1,103 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # 3rd party dependencies
2
+ import numpy as np
3
+ import cv2
4
+
5
+ # project dependencies
6
+ from deepface.commons import package_utils, weight_utils
7
+ from deepface.models.Demography import Demography
8
+ from deepface.commons.logger import Logger
9
+
10
+ logger = Logger()
11
+
12
+ # -------------------------------------------
13
+ # pylint: disable=line-too-long
14
+ # -------------------------------------------
15
+ # dependency configuration
16
+ tf_version = package_utils.get_tf_major_version()
17
+
18
+ if tf_version == 1:
19
+ from keras.models import Sequential
20
+ from keras.layers import Conv2D, MaxPooling2D, AveragePooling2D, Flatten, Dense, Dropout
21
+ else:
22
+ from tensorflow.keras.models import Sequential
23
+ from tensorflow.keras.layers import (
24
+ Conv2D,
25
+ MaxPooling2D,
26
+ AveragePooling2D,
27
+ Flatten,
28
+ Dense,
29
+ Dropout,
30
+ )
31
+ # -------------------------------------------
32
+
33
+ # Labels for the emotions that can be detected by the model.
34
+ labels = ["angry", "disgust", "fear", "happy", "sad", "surprise", "neutral"]
35
+
36
+ # pylint: disable=too-few-public-methods
37
+ class EmotionClient(Demography):
38
+ """
39
+ Emotion model class
40
+ """
41
+
42
+ def __init__(self):
43
+ self.model = load_model()
44
+ self.model_name = "Emotion"
45
+
46
+ def predict(self, img: np.ndarray) -> np.ndarray:
47
+ img_gray = cv2.cvtColor(img[0], cv2.COLOR_BGR2GRAY)
48
+ img_gray = cv2.resize(img_gray, (48, 48))
49
+ img_gray = np.expand_dims(img_gray, axis=0)
50
+
51
+ # model.predict causes memory issue when it is called in a for loop
52
+ # emotion_predictions = self.model.predict(img_gray, verbose=0)[0, :]
53
+ emotion_predictions = self.model(img_gray, training=False).numpy()[0, :]
54
+
55
+ return emotion_predictions
56
+
57
+
58
+ def load_model(
59
+ url="https://github.com/serengil/deepface_models/releases/download/v1.0/facial_expression_model_weights.h5",
60
+ ) -> Sequential:
61
+ """
62
+ Consruct emotion model, download and load weights
63
+ """
64
+
65
+ num_classes = 7
66
+
67
+ model = Sequential()
68
+
69
+ # 1st convolution layer
70
+ model.add(Conv2D(64, (5, 5), activation="relu", input_shape=(48, 48, 1)))
71
+ model.add(MaxPooling2D(pool_size=(5, 5), strides=(2, 2)))
72
+
73
+ # 2nd convolution layer
74
+ model.add(Conv2D(64, (3, 3), activation="relu"))
75
+ model.add(Conv2D(64, (3, 3), activation="relu"))
76
+ model.add(AveragePooling2D(pool_size=(3, 3), strides=(2, 2)))
77
+
78
+ # 3rd convolution layer
79
+ model.add(Conv2D(128, (3, 3), activation="relu"))
80
+ model.add(Conv2D(128, (3, 3), activation="relu"))
81
+ model.add(AveragePooling2D(pool_size=(3, 3), strides=(2, 2)))
82
+
83
+ model.add(Flatten())
84
+
85
+ # fully connected neural networks
86
+ model.add(Dense(1024, activation="relu"))
87
+ model.add(Dropout(0.2))
88
+ model.add(Dense(1024, activation="relu"))
89
+ model.add(Dropout(0.2))
90
+
91
+ model.add(Dense(num_classes, activation="softmax"))
92
+
93
+ # ----------------------------
94
+
95
+ weight_file = weight_utils.download_weights_if_necessary(
96
+ file_name="facial_expression_model_weights.h5", source_url=url
97
+ )
98
+
99
+ model = weight_utils.load_model_weights(
100
+ model=model, weight_file=weight_file
101
+ )
102
+
103
+ return model
deepface/deepface/models/demography/Gender.py ADDED
@@ -0,0 +1,79 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # 3rd party dependencies
2
+ import numpy as np
3
+
4
+ # project dependencies
5
+ from deepface.models.facial_recognition import VGGFace
6
+ from deepface.commons import package_utils, weight_utils
7
+ from deepface.models.Demography import Demography
8
+ from deepface.commons.logger import Logger
9
+
10
+ logger = Logger()
11
+
12
+ # -------------------------------------
13
+ # pylint: disable=line-too-long
14
+ # -------------------------------------
15
+ # dependency configurations
16
+
17
+ tf_version = package_utils.get_tf_major_version()
18
+ if tf_version == 1:
19
+ from keras.models import Model, Sequential
20
+ from keras.layers import Convolution2D, Flatten, Activation
21
+ else:
22
+ from tensorflow.keras.models import Model, Sequential
23
+ from tensorflow.keras.layers import Convolution2D, Flatten, Activation
24
+ # -------------------------------------
25
+
26
+ # Labels for the genders that can be detected by the model.
27
+ labels = ["Woman", "Man"]
28
+
29
+ # pylint: disable=too-few-public-methods
30
+ class GenderClient(Demography):
31
+ """
32
+ Gender model class
33
+ """
34
+
35
+ def __init__(self):
36
+ self.model = load_model()
37
+ self.model_name = "Gender"
38
+
39
+ def predict(self, img: np.ndarray) -> np.ndarray:
40
+ # model.predict causes memory issue when it is called in a for loop
41
+ # return self.model.predict(img, verbose=0)[0, :]
42
+ return self.model(img, training=False).numpy()[0, :]
43
+
44
+
45
+ def load_model(
46
+ url="https://github.com/serengil/deepface_models/releases/download/v1.0/gender_model_weights.h5",
47
+ ) -> Model:
48
+ """
49
+ Construct gender model, download its weights and load
50
+ Returns:
51
+ model (Model)
52
+ """
53
+
54
+ model = VGGFace.base_model()
55
+
56
+ # --------------------------
57
+
58
+ classes = 2
59
+ base_model_output = Sequential()
60
+ base_model_output = Convolution2D(classes, (1, 1), name="predictions")(model.layers[-4].output)
61
+ base_model_output = Flatten()(base_model_output)
62
+ base_model_output = Activation("softmax")(base_model_output)
63
+
64
+ # --------------------------
65
+
66
+ gender_model = Model(inputs=model.input, outputs=base_model_output)
67
+
68
+ # --------------------------
69
+
70
+ # load weights
71
+ weight_file = weight_utils.download_weights_if_necessary(
72
+ file_name="gender_model_weights.h5", source_url=url
73
+ )
74
+
75
+ gender_model = weight_utils.load_model_weights(
76
+ model=gender_model, weight_file=weight_file
77
+ )
78
+
79
+ return gender_model
deepface/deepface/models/demography/Race.py ADDED
@@ -0,0 +1,76 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # 3rd party dependencies
2
+ import numpy as np
3
+
4
+ # project dependencies
5
+ from deepface.models.facial_recognition import VGGFace
6
+ from deepface.commons import package_utils, weight_utils
7
+ from deepface.models.Demography import Demography
8
+ from deepface.commons.logger import Logger
9
+
10
+ logger = Logger()
11
+
12
+ # --------------------------
13
+ # pylint: disable=line-too-long
14
+ # --------------------------
15
+ # dependency configurations
16
+ tf_version = package_utils.get_tf_major_version()
17
+
18
+ if tf_version == 1:
19
+ from keras.models import Model, Sequential
20
+ from keras.layers import Convolution2D, Flatten, Activation
21
+ else:
22
+ from tensorflow.keras.models import Model, Sequential
23
+ from tensorflow.keras.layers import Convolution2D, Flatten, Activation
24
+ # --------------------------
25
+ # Labels for the ethnic phenotypes that can be detected by the model.
26
+ labels = ["asian", "indian", "black", "white", "middle eastern", "latino hispanic"]
27
+
28
+ # pylint: disable=too-few-public-methods
29
+ class RaceClient(Demography):
30
+ """
31
+ Race model class
32
+ """
33
+
34
+ def __init__(self):
35
+ self.model = load_model()
36
+ self.model_name = "Race"
37
+
38
+ def predict(self, img: np.ndarray) -> np.ndarray:
39
+ # model.predict causes memory issue when it is called in a for loop
40
+ # return self.model.predict(img, verbose=0)[0, :]
41
+ return self.model(img, training=False).numpy()[0, :]
42
+
43
+
44
+ def load_model(
45
+ url="https://github.com/serengil/deepface_models/releases/download/v1.0/race_model_single_batch.h5",
46
+ ) -> Model:
47
+ """
48
+ Construct race model, download its weights and load
49
+ """
50
+
51
+ model = VGGFace.base_model()
52
+
53
+ # --------------------------
54
+
55
+ classes = 6
56
+ base_model_output = Sequential()
57
+ base_model_output = Convolution2D(classes, (1, 1), name="predictions")(model.layers[-4].output)
58
+ base_model_output = Flatten()(base_model_output)
59
+ base_model_output = Activation("softmax")(base_model_output)
60
+
61
+ # --------------------------
62
+
63
+ race_model = Model(inputs=model.input, outputs=base_model_output)
64
+
65
+ # --------------------------
66
+
67
+ # load weights
68
+ weight_file = weight_utils.download_weights_if_necessary(
69
+ file_name="race_model_single_batch.h5", source_url=url
70
+ )
71
+
72
+ race_model = weight_utils.load_model_weights(
73
+ model=race_model, weight_file=weight_file
74
+ )
75
+
76
+ return race_model
deepface/deepface/models/demography/__init__.py ADDED
File without changes