Car_VS_Rest / code.txt
Nekshay's picture
Update code.txt
70f3968 verified
import os
import random
import shutil
from concurrent.futures import ThreadPoolExecutor
# Define paths
dataset_folder = 'path/to/dataset'
train_folder = os.path.join(dataset_folder, 'train')
val_folder = os.path.join(dataset_folder, 'validation')
# Create validation folder if it doesn't exist
os.makedirs(val_folder, exist_ok=True)
# Get all label folders inside train folder
label_folders = [f for f in os.listdir(train_folder) if os.path.isdir(os.path.join(train_folder, f))]
# Function to move images from a specific label folder
def process_label_folder(label_folder, num_threads):
train_label_folder = os.path.join(train_folder, label_folder)
val_label_folder = os.path.join(val_folder, label_folder)
# Create corresponding validation label folder
os.makedirs(val_label_folder, exist_ok=True)
# Get all images in the train/label_folder
all_images = os.listdir(train_label_folder)
total_images = len(all_images)
# Calculate 20% of images for validation
val_size = int(total_images * 0.2)
# Randomly select 20% of the images for validation
val_images = random.sample(all_images, val_size)
# Function to move a single image
def move_image(image):
src = os.path.join(train_label_folder, image)
dest = os.path.join(val_label_folder, image)
shutil.move(src, dest)
# Use ThreadPoolExecutor to move images in parallel
with ThreadPoolExecutor(max_workers=num_threads) as executor:
executor.map(move_image, val_images)
print(f"Moved {val_size} images from {label_folder} to validation folder.")
# Main function to get user input for number of threads and process folders
def main():
# Ask user for the number of threads
num_threads = int(input("Enter the number of threads to use: "))
# Process each label folder using the input number of threads
for label_folder in label_folders:
process_label_folder(label_folder, num_threads)
print("Validation dataset created.")
if __name__ == "__main__":
main()
import numpy as np
from sklearn.metrics import confusion_matrix, precision_score, recall_score, f1_score, accuracy_score
# Assuming you have true labels and predicted labels
y_true = [0, 1, 2, 1, 0, 1, 2, 2, 0] # Replace with your true labels
y_pred = [0, 0, 2, 1, 0, 1, 2, 1, 0] # Replace with your predicted labels
# Calculate the confusion matrix
conf_matrix = confusion_matrix(y_true, y_pred)
# Print the confusion matrix
print("Confusion Matrix:")
print(conf_matrix)
# Calculate precision, recall, f1-score, and accuracy for each label
precision = precision_score(y_true, y_pred, average=None)
recall = recall_score(y_true, y_pred, average=None)
f1 = f1_score(y_true, y_pred, average=None)
accuracy = accuracy_score(y_true, y_pred)
# Print precision, recall, f1-score for each label
for i in range(len(precision)):
print(f"Label {i}:")
print(f" Precision: {precision[i]:.4f}")
print(f" Recall: {recall[i]:.4f}")
print(f" F1-Score: {f1[i]:.4f}")
print()
# Print overall accuracy
print(f"Overall Accuracy: {accuracy:.4f}")
import numpy as np
from sklearn.metrics import confusion_matrix
# Example true and predicted labels
y_true = [0, 1, 2, 1, 0, 1, 2, 2, 0] # Replace with your true labels
y_pred = [0, 0, 2, 1, 0, 1, 2, 1, 0] # Replace with your predicted labels
# Class names (replace with your actual labels)
label_names = ['Class A', 'Class B', 'Class C']
# Calculate the confusion matrix
conf_matrix = confusion_matrix(y_true, y_pred)
# Print the confusion matrix
print("Confusion Matrix:")
print(conf_matrix)
# Number of classes
num_classes = conf_matrix.shape[0]
# Initialize lists for precision, recall, and f1-score
precision = []
recall = []
f1_score = []
# Calculate precision, recall, and F1-score from confusion matrix for each class
for i in range(num_classes):
tp = conf_matrix[i, i] # True Positives
fp = np.sum(conf_matrix[:, i]) - tp # False Positives
fn = np.sum(conf_matrix[i, :]) - tp # False Negatives
# Calculate precision, recall, f1-score
precision_i = tp / (tp + fp) if (tp + fp) > 0 else 0
recall_i = tp / (tp + fn) if (tp + fn) > 0 else 0
f1_i = 2 * (precision_i * recall_i) / (precision_i + recall_i) if (precision_i + recall_i) > 0 else 0
# Append to lists
precision.append(precision_i)
recall.append(recall_i)
f1_score.append(f1_i)
# Print precision, recall, f1-score for each label
for i, label in enumerate(label_names):
print(f"{label}:")
print(f" Precision: {precision[i]:.4f}")
print(f" Recall: {recall[i]:.4f}")
print(f" F1-Score: {f1_score[i]:.4f}")
print()
import React, { useState, useEffect } from "react";
import * as tflite from "@tensorflow/tfjs-tflite";
import * as tf from "@tensorflow/tfjs";
function ObjectDetector() {
const [model, setModel] = useState(null);
const [imageUrl, setImageUrl] = useState(null);
const [predictions, setPredictions] = useState([]);
// Load the TFLite model
useEffect(() => {
const loadModel = async () => {
const loadedModel = await tflite.loadTFLiteModel('/path_to_your_model.tflite');
setModel(loadedModel);
};
loadModel();
}, []);
// Handle image input change
const handleImageChange = (event) => {
const file = event.target.files[0];
if (file) {
setImageUrl(URL.createObjectURL(file));
}
};
// Run inference on the selected image
const runInference = async () => {
if (!model || !imageUrl) return;
const imageElement = document.getElementById("inputImage");
// Load the image into a tensor
const inputTensor = preprocessImage(imageElement, [1, 320, 320, 3]); // Adjust this size based on your model's expected input
// Run inference
const output = await model.predict(inputTensor);
// Extract predictions
const [boxes, classes, scores, numDetections] = extractPredictions(output);
// Display the predictions
const predictionResults = [];
for (let i = 0; i < numDetections; i++) {
if (scores[i] > 0.5) { // Only display results with confidence > 0.5
predictionResults.push({
class: classes[i], // Map class ID to label if available
score: scores[i],
bbox: boxes[i],
});
}
}
setPredictions(predictionResults);
// Clean up the tensor to free memory
tf.dispose([inputTensor]);
};
// Function to preprocess image (resize, normalize, and convert to tensor)
const preprocessImage = (image, inputShape) => {
const tensor = tf.browser.fromPixels(image) // Load image into a tensor
.toFloat()
.div(tf.scalar(255.0)) // Normalize pixel values to [0, 1]
.resizeBilinear([inputShape[1], inputShape[2]]) // Resize to 320x320 or model input size
.expandDims(0); // Add batch dimension [1, 320, 320, 3]
return tensor;
};
// Function to extract bounding boxes, class IDs, and scores from the model output
const extractPredictions = (output) => {
const boxes = output[0].arraySync(); // Bounding boxes
const classes = output[1].arraySync(); // Class IDs
const scores = output[2].arraySync(); // Confidence scores
const numDetections = output[3].arraySync()[0]; // Number of detected objects
return [boxes, classes, scores, numDetections];
};
return (
<div>
<h1>Object Detection with TFLite</h1>
{/* Input: Upload Image */}
<input type="file" accept="image/*" onChange={handleImageChange} />
{/* Display Selected Image */}
{imageUrl && (
<div>
<img id="inputImage" src={imageUrl} alt="Input" width="300px" />
</div>
)}
{/* Run Inference Button */}
<button onClick={runInference} disabled={!model}>
Run Inference
</button>
{/* Display Predictions */}
{predictions.length > 0 && (
<div>
<h2>Predictions:</h2>
<ul>
{predictions.map((pred, index) => (
<li key={index}>
{`Class: ${pred.class}, Confidence: ${pred.score.toFixed(2)}, Bounding Box: [${pred.bbox}]`}
</li>
))}
</ul>
</div>
)}
</div>
);
}
export default ObjectDetector;
import json
import random
import os
# Load the COCO annotations file
coco_file = 'annotations.json' # Path to your COCO annotations file
output_dir = 'output_dir/' # Directory to save the split files
train_ratio = 0.8 # 80% for training, 20% for validation
# Create output directory if it doesn't exist
if not os.path.exists(output_dir):
os.makedirs(output_dir)
# Load COCO annotations
with open(coco_file, 'r') as f:
coco_data = json.load(f)
# Extract images and annotations
images = coco_data['images']
annotations = coco_data['annotations']
# Shuffle images to ensure random split
random.shuffle(images)
# Split images into training and validation sets
train_size = int(len(images) * train_ratio)
train_images = images[:train_size]
val_images = images[train_size:]
# Create dictionaries to store image IDs for filtering annotations
train_image_ids = {img['id'] for img in train_images}
val_image_ids = {img['id'] for img in val_images}
# Split annotations based on image IDs
train_annotations = [ann for ann in annotations if ann['image_id'] in train_image_ids]
val_annotations = [ann for ann in annotations if ann['image_id'] in val_image_ids]
# Create train and validation splits for COCO format
train_data = {
'images': train_images,
'annotations': train_annotations,
'categories': coco_data['categories'], # Keep categories the same
}
val_data = {
'images': val_images,
'annotations': val_annotations,
'categories': coco_data['categories'], # Keep categories the same
}
# Save the new train and validation annotation files
train_file = os.path.join(output_dir, 'train_annotations.json')
val_file = os.path.join(output_dir, 'val_annotations.json')
with open(train_file, 'w') as f:
json.dump(train_data, f)
with open(val_file, 'w') as f:
json.dump(val_data, f)
print(f"Train annotations saved to: {train_file}")
print(f"Validation annotations saved to: {val_file}")