shacharrosenman commited on
Commit
5101535
·
verified ·
1 Parent(s): 2fa7714

Create README.md

Browse files
Files changed (1) hide show
  1. README.md +118 -0
README.md ADDED
@@ -0,0 +1,118 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # NeuroPrompts Model Card
2
+
3
+ NeuroPrompts is an interface to Stable Diffusion which automatically optimizes a user’s prompt for improved image aesthetics while maintaining stylistic control according to the user’s preferences.
4
+ Preprint: [arxiv.org/abs/2311.12229](https://arxiv.org/abs/2311.12229)
5
+
6
+ NeuroPrompts was accepted to EACL 2024.
7
+
8
+ ## The interface of NeuroPrompts in side-by-side comparison mode
9
+
10
+ ![NeuroPrompts overview](images/comparisons_interface.png)
11
+
12
+
13
+
14
+ ## Usage
15
+
16
+ ```shell script
17
+ pip install torch torchvision gradio==3.39.0 transformers diffusers flair==0.12.2 numpy tqdm webdataset pytorch_lightning datasets openai-clip scipy==1.10.1
18
+ ```
19
+
20
+ ```python
21
+ import torch
22
+ from transformers import AutoModelForCausalLM, AutoTokenizer
23
+ import sys
24
+ import os
25
+ # from categories import styles_list, artists_list, formats_list, perspective_list, booster_list, vibe_list
26
+
27
+
28
+ # Set environment variables and PyTorch configurations
29
+ os.environ["CUBLAS_WORKSPACE_CONFIG"] = ":16:8"
30
+ torch.backends.cudnn.benchmark = False
31
+ torch.use_deterministic_algorithms(True)
32
+
33
+
34
+ # Set the path for the 'neurologic' module
35
+ neurologic_path = os.path.abspath('neurologic/')
36
+ os.environ['NEUROLOGIC_PATH'] = neurologic_path
37
+ sys.path.insert(0,neurologic_path)
38
+ from neurologic_pe import generate_neurologic
39
+
40
+ # Load the pre-trained model and tokenizer
41
+ model_name = "/home/srosenma/src/test_45"
42
+ model_type = 'finetuned'
43
+ # model_type = 'ppo'
44
+ rand_seed = 1535471403
45
+
46
+
47
+ model = AutoModelForCausalLM.from_pretrained(model_name).to('cuda')
48
+ tokenizer = AutoTokenizer.from_pretrained(model_name)
49
+ tokenizer.pad_token = tokenizer.eos_token
50
+
51
+
52
+ # Set the inference parameters
53
+ length_penalty = 1.0
54
+ max_length = 77
55
+ beam_size = 5
56
+ inference_steps = 25
57
+
58
+
59
+ # Initialize the input constraints
60
+ curr_input_artist = None
61
+ curr_input_style = None
62
+ curr_input_format = None
63
+ curr_input_perspective = None
64
+ curr_input_booster = None
65
+ curr_input_vibe = None
66
+ curr_input_negative = ""
67
+
68
+
69
+ # Set the plain text input
70
+ plain_text = "A boy and his dog"
71
+
72
+
73
+ # Construct the positive and negative constraints
74
+ constraints = []
75
+ for clause in [curr_input_artist, curr_input_style, curr_input_format, curr_input_perspective, curr_input_booster, curr_input_vibe]:
76
+ if clause is not None and len(clause) > 0:
77
+ constraints.append([clause.lower(), clause.title()])
78
+
79
+ print(f"Positive constraints:{constraints}")
80
+
81
+
82
+ neg_constraints = []
83
+ neg_inputs = [i.strip() for i in curr_input_negative.split(',')]
84
+ for clause in neg_inputs:
85
+ if clause is not None and len(clause) > 0:
86
+ neg_constraints += [clause.lower(), clause.title()]
87
+
88
+ print(f"Negative constraints:{neg_constraints}")
89
+
90
+
91
+
92
+ # Generate the output using the 'generate_neurologic' function
93
+ res = generate_neurologic(plain_text,
94
+ model=model,
95
+ tokenizer=tokenizer,
96
+ model_type=model_type,
97
+ constraint_method='clusters',
98
+ clusters_file='/home/philliph/mcai/mm-counterfactuals/prompt_engineering/template_keywords.json',
99
+ user_constraints = constraints if len(constraints) > 0 else None,
100
+ negative_constraints = neg_constraints if len(neg_constraints) > 0 else None,
101
+ length_penalty=float(length_penalty),
102
+ max_tgt_length=int(max_length),
103
+ beam_size=int(beam_size),
104
+ num_return_sequences=int(beam_size),
105
+ ngram_size=2,
106
+ n_per_cluster=1,
107
+ seed=None)[0][0]
108
+
109
+
110
+
111
+ # Print the result
112
+ print(f"\nResult:\n{res}")
113
+
114
+
115
+
116
+
117
+ ```
118
+