Nekshay commited on
Commit
de86909
·
verified ·
1 Parent(s): 31982cc

Update tempo.txt

Browse files
Files changed (1) hide show
  1. tempo.txt +64 -48
tempo.txt CHANGED
@@ -1,48 +1,64 @@
1
- import cv2
2
- import numpy as np
3
-
4
- def detect_half_circles(binary_image, threshold=0.8):
5
- # Convert binary image to BGR for drawing colored shapes
6
- output_image = cv2.cvtColor(binary_image, cv2.COLOR_GRAY2BGR)
7
-
8
- # Find contours in the binary image
9
- contours, _ = cv2.findContours(binary_image, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
10
-
11
- detected_shapes = []
12
-
13
- for contour in contours:
14
- # Calculate the perimeter of the contour
15
- perimeter = cv2.arcLength(contour, True)
16
-
17
- # Fit an ellipse to the contour
18
- if len(contour) >= 5: # Need at least 5 points to fit an ellipse
19
- ellipse = cv2.fitEllipse(contour)
20
- (x, y), (MA, ma), angle = ellipse
21
-
22
- # Check if the major axis is approximately twice the minor axis (indicating a half-circle)
23
- if 0.8 < (MA / ma) < 1.2: # This range can be adjusted based on specific requirements
24
- aspect_ratio = float(MA) / ma
25
-
26
- # Check if the aspect ratio is approximately 2:1 (indicating a half-circle shape)
27
- if 0.4 < aspect_ratio < 0.6: # This range can also be adjusted
28
- detected_shapes.append(contour)
29
-
30
- # Draw the contour with a different color
31
- cv2.drawContours(output_image, [contour], -1, (0, 0, 255), 2)
32
-
33
- return output_image, detected_shapes
34
-
35
- # Example usage
36
- # Load the binary masked image (make sure it's a binary image)
37
- binary_image = cv2.imread('path_to_your_image.png', cv2.IMREAD_GRAYSCALE)
38
-
39
- # Threshold the image to make sure it's binary
40
- _, binary_image = cv2.threshold(binary_image, 127, 255, cv2.THRESH_BINARY)
41
-
42
- # Detect half-circles and draw contours
43
- output_image, detected_shapes = detect_half_circles(binary_image)
44
-
45
- # Show the result
46
- cv2.imshow('Detected Half-Circles', output_image)
47
- cv2.waitKey(0)
48
- cv2.destroyAllWindows()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ from torch import nn
3
+ from torch.utils.data import DataLoader
4
+
5
+ # Hyperparameters
6
+ image_size = (224, 224, 3) # Adjust based on your data
7
+
8
+ # Define the Generator Network
9
+ class Generator(nn.Module):
10
+ def __init__(self):
11
+ super(Generator, self).__init__()
12
+ # Define convolutional layers with appropriate filters and activations
13
+ self.conv1 = nn.Conv2d(3, 32, kernel_size=3, stride=1, padding=1)
14
+ # ... Add more convolutional layers as needed
15
+ self.conv_final = nn.Conv2d(128, 3, kernel_size=3, stride=1, padding=1, activation=nn.Tanh) # Tanh for shadow intensity
16
+
17
+ def forward(self, x):
18
+ # Define the forward pass through the convolutional layers
19
+ x = self.conv1(x)
20
+ # ... Forward pass through remaining convolutional layers
21
+ return self.conv_final(x)
22
+
23
+ # Define the Discriminator Network
24
+ class Discriminator(nn.Module):
25
+ def __init__(self):
26
+ super(Discriminator, self).__init__()
27
+ # Define convolutional layers with appropriate filters and activations
28
+ self.conv1 = nn.Conv2d(6, 32, kernel_size=3, stride=1, padding=1)
29
+ # ... Add more convolutional layers as needed
30
+ self.linear = nn.Linear(128, 1) # Final layer with sigmoid activation
31
+
32
+ def forward(self, car, shadow):
33
+ # Concatenate car and shadow features
34
+ x = torch.cat([car, shadow], dim=1)
35
+ # Define the forward pass through the convolutional layers
36
+ x = self.conv1(x)
37
+ # ... Forward pass through remaining convolutional layers
38
+ return torch.sigmoid(self.linear(x))
39
+
40
+ # Create data loaders for training and validation data
41
+ # ... (Implement data loading logic using PyTorch's DataLoader)
42
+
43
+ # Create the models
44
+ generator = Generator()
45
+ discriminator = Discriminator()
46
+
47
+ # Define loss function and optimizer
48
+ criterion = nn.BCELoss()
49
+ g_optimizer = torch.optim.Adam(generator.parameters(), lr=0.0002)
50
+ d_optimizer = torch.optim.Adam(discriminator.parameters(), lr=0.0002)
51
+
52
+ # Training loop
53
+ for epoch in range(epochs):
54
+ # Train the Discriminator
55
+ # ... (Implement discriminator training logic with loss calculation and updates)
56
+
57
+ # Train the Generator
58
+ # ... (Implement generator training logic with loss calculation and updates)
59
+
60
+ # Print training progress
61
+ # ... (Print loss values or other metrics)
62
+
63
+ # Save the trained generator
64
+ torch.save(generator.state_dict(), 'generator.pt')