xinchen9 commited on
Commit
c696f0f
·
verified ·
1 Parent(s): e16a52c

[Update]round back

Browse files
Files changed (1) hide show
  1. about.py +14 -5
about.py CHANGED
@@ -22,19 +22,28 @@ NUM_FEWSHOT = 0 # Change with your few shot
22
 
23
 
24
  # Your leaderboard name
25
- TITLE = """<h1 align="center" id="space-title"> Demo of AdvUnlearn</h1>"""
26
 
27
  # subtitle
28
- SUB_TITLE = """<h2 align="center" id="space-title">A robust unlearning framework </h1>"""
29
 
30
  # What does your leaderboard evaluate?
31
  INTRODUCTION_TEXT = """
32
- AdvUnlearn is a robust unlearning framework. It aims to enhance the robustness of concept erasing by integrating
33
- the principle of adversarial training (AT) into machine unlearning and also achieves a balanced tradeoff with model utility. For details, please
34
- read the [paper](https://arxiv.org/abs/2405.15234) and check the [code](https://github.com/OPTML-Group/AdvUnlearn).
 
 
 
 
 
 
 
 
35
 
36
  """
37
 
 
38
  # Which evaluations are you running? how can people reproduce what you have?
39
  LLM_BENCHMARKS_TEXT = f"""
40
  ## How it works
 
22
 
23
 
24
  # Your leaderboard name
25
+ TITLE = """<h1 align="center" id="space-title"> Demo of UnlearnDiffAtk</h1>"""
26
 
27
  # subtitle
28
+ SUB_TITLE = """<h2 align="center" id="space-title">Effective and efficient adversarial prompt generation approach for diffusion models</h1>"""
29
 
30
  # What does your leaderboard evaluate?
31
  INTRODUCTION_TEXT = """
32
+ UnlearnDiffAtk is an effective and efficient adversarial prompt generation approach for unlearned diffusion models(DMs). For more details,
33
+ please refer to the [benchmark of UnlearnDiffAtk](https://huggingface.co/spaces/xinchen9/UnlearnDiffAtk-Benchmark), visit the [project](https://www.optml-group.com/posts/mu_attack),
34
+ check the [code](https://github.com/OPTML-Group/Diffusion-MU-Attack), and read the [paper](https://arxiv.org/abs/2310.11868).\\
35
+ The prompts were validated by us for undesirable concepts: ([Church](https://github.com/OPTML-Group/Diffusion-MU-Attack/blob/e848ddd19df1f86d08e08cc9146f8a2bb126da12/prompts/church.csv),
36
+ [Garbage Truch](https://github.com/OPTML-Group/Diffusion-MU-Attack/blob/e848ddd19df1f86d08e08cc9146f8a2bb126da12/prompts/garbage_truck.csv),
37
+ [Parachute](https://github.com/OPTML-Group/Diffusion-MU-Attack/blob/e848ddd19df1f86d08e08cc9146f8a2bb126da12/prompts/parachute.csv),
38
+ [Tench](https://github.com/OPTML-Group/Diffusion-MU-Attack/blob/e848ddd19df1f86d08e08cc9146f8a2bb126da12/prompts/tench.csv)),
39
+ style ([Van Gogh](https://github.com/OPTML-Group/Diffusion-MU-Attack/blob/e848ddd19df1f86d08e08cc9146f8a2bb126da12/prompts/vangogh.csv)),
40
+ and objects ([Nudity](https://github.com/OPTML-Group/Diffusion-MU-Attack/blob/e848ddd19df1f86d08e08cc9146f8a2bb126da12/prompts/nudity.csv),
41
+ [Illegal Activity](https://github.com/OPTML-Group/Diffusion-MU-Attack/blob/e848ddd19df1f86d08e08cc9146f8a2bb126da12/prompts/illegal.csv),
42
+ [Violence](https://github.com/OPTML-Group/Diffusion-MU-Attack/blob/e848ddd19df1f86d08e08cc9146f8a2bb126da12/prompts/violence.csv)).
43
 
44
  """
45
 
46
+
47
  # Which evaluations are you running? how can people reproduce what you have?
48
  LLM_BENCHMARKS_TEXT = f"""
49
  ## How it works