ford442 commited on
Commit
ee4cc69
·
verified ·
1 Parent(s): 95b472b

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +0 -78
app.py CHANGED
@@ -53,69 +53,6 @@ request_log = []
53
  clip_model = CLIPModel.from_pretrained("openai/clip-vit-base-patch32", cache_dir=model_path)
54
  clip_processor = CLIPProcessor.from_pretrained("openai/clip-vit-base-patch32", cache_dir=model_path)
55
 
56
- if not os.path.exists(LOG_FILE_PATH):
57
- with open(LOG_FILE_PATH, "w", newline="") as f:
58
- writer = csv.writer(f)
59
- writer.writerow(
60
- [
61
- "timestamp",
62
- "request_type",
63
- "prompt",
64
- "negative_prompt",
65
- "height",
66
- "width",
67
- "num_frames",
68
- "frame_rate",
69
- "seed",
70
- "num_inference_steps",
71
- "guidance_scale",
72
- "is_enhanced",
73
- "clip_embedding",
74
- "original_resolution",
75
- ]
76
- )
77
-
78
- @lru_cache(maxsize=128)
79
- def log_request(
80
- request_type,
81
- prompt,
82
- negative_prompt,
83
- height,
84
- width,
85
- num_frames,
86
- frame_rate,
87
- seed,
88
- num_inference_steps,
89
- guidance_scale,
90
- is_enhanced,
91
- clip_embedding=None,
92
- original_resolution=None,
93
- ):
94
- """Log the user's request to a CSV file."""
95
- timestamp = datetime.now().isoformat()
96
- with open(LOG_FILE_PATH, "a", newline="") as f:
97
- try:
98
- writer = csv.writer(f)
99
- writer.writerow(
100
- [
101
- timestamp,
102
- request_type,
103
- prompt,
104
- negative_prompt,
105
- height,
106
- width,
107
- num_frames,
108
- frame_rate,
109
- seed,
110
- num_inference_steps,
111
- guidance_scale,
112
- is_enhanced,
113
- clip_embedding,
114
- original_resolution,
115
- ]
116
- )
117
- except Exception as e:
118
- print(f"Error logging request: {e}")
119
 
120
  def compute_clip_embedding(text=None, image=None):
121
  """
@@ -400,21 +337,6 @@ def generate_video_from_image(
400
  original_resolution = f"{img.width}x{img.height}" # Format as "widthxheight"
401
  clip_embedding = compute_clip_embedding(image=img)
402
 
403
- log_request(
404
- "img2vid",
405
- prompt,
406
- negative_prompt,
407
- height,
408
- width,
409
- num_frames,
410
- frame_rate,
411
- seed,
412
- num_inference_steps,
413
- guidance_scale,
414
- enhance_prompt_toggle,
415
- json.dumps(clip_embedding),
416
- original_resolution,
417
- )
418
 
419
  media_items = load_image_to_tensor_with_resize(image_path, height, width).to(device).detach()
420
 
 
53
  clip_model = CLIPModel.from_pretrained("openai/clip-vit-base-patch32", cache_dir=model_path)
54
  clip_processor = CLIPProcessor.from_pretrained("openai/clip-vit-base-patch32", cache_dir=model_path)
55
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
56
 
57
  def compute_clip_embedding(text=None, image=None):
58
  """
 
337
  original_resolution = f"{img.width}x{img.height}" # Format as "widthxheight"
338
  clip_embedding = compute_clip_embedding(image=img)
339
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
340
 
341
  media_items = load_image_to_tensor_with_resize(image_path, height, width).to(device).detach()
342