srijaydeshpande commited on
Commit
f59f99a
·
verified ·
1 Parent(s): f912845

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +374 -372
app.py CHANGED
@@ -1,270 +1,101 @@
1
- from pdfminer.high_level import extract_pages
2
- from pdfminer.layout import LTTextContainer
3
- from tqdm import tqdm
4
- import re
5
- import gradio as gr
6
- import os
7
- import accelerate
8
- import spaces
9
- import subprocess
10
- from huggingface_hub import hf_hub_download
11
- from llama_cpp import Llama
12
- from llama_cpp_agent import LlamaCppAgent, MessagesFormatterType
13
- from llama_cpp_agent.providers import LlamaCppPythonProvider
14
- from llama_cpp_agent.chat_history import BasicChatHistory
15
- from llama_cpp_agent.chat_history.messages import Roles
16
-
17
- # subprocess.run('pip install llama-cpp-python==0.2.75 --extra-index-url https://abetlen.github.io/llama-cpp-python/whl/cu124', shell=True)
18
- # subprocess.run('pip install llama-cpp-agent==0.2.10', shell=True)
19
-
20
-
21
- # hf_hub_download(
22
- # repo_id="QuantFactory/Meta-Llama-3-8B-Instruct-GGUF",
23
- # filename="Meta-Llama-3-8B-Instruct.Q8_0.gguf",
24
- # local_dir = "./models"
25
- # )
26
-
27
- hf_hub_download(
28
- repo_id="bartowski/Meta-Llama-3-70B-Instruct-GGUF",
29
- filename="Meta-Llama-3-70B-Instruct-Q3_K_M.gguf",
30
- local_dir = "./models"
31
- )
32
-
33
- def process_document(pdf_path, page_ids=None):
34
- extracted_pages = extract_pages(pdf_path, page_numbers=page_ids)
35
-
36
- page2content = {}
37
-
38
- for extracted_page in tqdm(extracted_pages):
39
- page_id = extracted_page.pageid
40
- content = process_page(extracted_page)
41
- page2content[page_id] = content
42
-
43
- return page2content
44
-
45
-
46
- def process_page(extracted_page):
47
- content = []
48
- elements = [element for element in extracted_page._objs]
49
- elements.sort(key=lambda a: a.y1, reverse=True)
50
- for i, element in enumerate(elements):
51
- if isinstance(element, LTTextContainer):
52
- line_text = extract_text_and_normalize(element)
53
- content.append(line_text)
54
- content = re.sub('\n+', ' ', ''.join(content))
55
- return content
56
-
57
-
58
- def extract_text_and_normalize(element):
59
- # Extract text from line and split it with new lines
60
- line_texts = element.get_text().split('\n')
61
- norm_text = ''
62
- for line_text in line_texts:
63
- line_text = line_text.strip()
64
- if not line_text:
65
- line_text = '\n'
66
- else:
67
- line_text = re.sub('\s+', ' ', line_text)
68
- if not re.search('[\w\d\,\-]', line_text[-1]):
69
- line_text += '\n'
70
- else:
71
- line_text += ' '
72
- norm_text += line_text
73
- return norm_text
74
-
75
-
76
- def txt_to_html(text):
77
- html_content = "<html><body>"
78
- for line in text.split('\n'):
79
- html_content += "<p>{}</p>".format(line.strip())
80
- html_content += "</body></html>"
81
- return html_content
82
-
83
-
84
- def deidentify_doc(pdftext, maxtokens, temperature, top_probability):
85
- prompt = "In the following text replace any person name and any address with term [redacted], replace any Date of Birth and NHS number with term [redacted]"
86
-
87
- # model_id = "models/Meta-Llama-3-70B-Instruct-Q3_K_M.gguf"
88
- # # model = Llama(model_path=model_id, n_ctx=2048, n_threads=8, n_gpu_layers=-1, n_batch=128)
89
- # model = Llama(
90
- # model_path=model_id,
91
- # flash_attn=True,
92
- # n_gpu_layers=81,
93
- # n_batch=1024,
94
- # n_ctx=8192,
95
- # )
96
-
97
- llm = Llama(
98
- model_path="models/Meta-Llama-3-70B-Instruct-Q3_K_M.gguf",
99
- flash_attn=True,
100
- n_gpu_layers=81,
101
- n_batch=1024,
102
- n_ctx=8192,
103
- )
104
- provider = LlamaCppPythonProvider(llm)
105
-
106
- agent = LlamaCppAgent(
107
- provider,
108
- )
109
-
110
- settings = provider.get_provider_default_settings()
111
- settings.temperature = 0.7
112
- settings.top_k = 40
113
- settings.top_p = 0.95
114
- settings.max_tokens = 2048
115
- settings.repeat_penalty = 1.1
116
- settings.stream = True
117
-
118
-
119
- stream = agent.get_chat_response(
120
- prompt + ' : ' + pdftext,
121
- llm_sampling_settings=settings,
122
- returns_streaming_generator=True,
123
- print_output=False
124
- )
125
-
126
- outputs = ""
127
- for output in stream:
128
- outputs += output
129
- yield outputs
130
-
131
- # output = model.create_chat_completion(
132
- # messages=[
133
- # {"role": "assistant", "content": prompt},
134
- # {
135
- # "role": "user",
136
- # "content": pdftext
137
- # }
138
- # ],
139
- # max_tokens=maxtokens,
140
- # temperature=temperature
141
- # )
142
- # output = output['choices'][0]['message']['content']
143
-
144
- # prompt = "Perform the following actions on given text: 1. Replace any person age with term [redacted] 2. DO NOT REPLACE ANY MEDICAL MEASUREMENTS 3. Replace only the CALENDAR DATES of format 'day/month/year' with term [redacted]"
145
- # output = model.create_chat_completion(
146
- # messages=[
147
- # {"role": "assistant", "content": prompt},
148
- # {
149
- # "role": "user",
150
- # "content": output
151
- # }
152
- # ],
153
- # max_tokens=maxtokens,
154
- # temperature=temperature
155
- # )
156
- # output = output['choices'][0]['message']['content']
157
-
158
- # print(prompt)
159
- # print(output)
160
- # print('-------------------------------------------------------')
161
-
162
- # return outputs
163
-
164
- @spaces.GPU(duration=120)
165
- def pdf_to_text(files, maxtokens=2048, temperature=0, top_probability=0.95):
166
- files=[files]#remove later
167
- for file in files:
168
- file_name = os.path.basename(file)
169
- file_name_splt = file_name.split('.')
170
- # print('File name is ', file_name)
171
- if (len(file_name_splt) > 1 and file_name_splt[1] == 'pdf'):
172
- page2content = process_document(file, page_ids=[0])
173
- pdftext = page2content[1]
174
- # pdftext = file # remove later
175
- if (pdftext): #shift this if block to right later
176
- anonymized_text = deidentify_doc(pdftext, maxtokens, temperature, top_probability)
177
- return anonymized_text
178
-
179
-
180
- css = ".gradio-container {background: 'logo.png'}"
181
- temp_slider = gr.Slider(minimum=0, maximum=2, value=0.9, label="Temperature Value")
182
- prob_slider = gr.Slider(minimum=0, maximum=1, value=0.95, label="Max Probability Value")
183
- max_tokens = gr.Number(value=600, label="Max Tokens")
184
- input_folder = gr.File(file_count='multiple')
185
- input_folder_text = gr.Textbox(label='Enter output folder path')
186
- output_text = gr.Textbox()
187
- output_path_component = gr.File(label="Select Output Path")
188
- iface = gr.Interface(
189
- fn=pdf_to_text,
190
- inputs='file',
191
- # inputs=["textbox", input_folder_text, "textbox", max_tokens, temp_slider, prob_slider],
192
- outputs=output_text,
193
- title='COBIx Endoscopy Report De-Identification',
194
- description="This application assists to remove personal information from the uploaded clinical report",
195
- theme=gr.themes.Soft(),
196
- )
197
- iface.launch()
198
-
199
  # import spaces
200
- # import json
201
  # import subprocess
 
202
  # from llama_cpp import Llama
203
  # from llama_cpp_agent import LlamaCppAgent, MessagesFormatterType
204
  # from llama_cpp_agent.providers import LlamaCppPythonProvider
205
  # from llama_cpp_agent.chat_history import BasicChatHistory
206
  # from llama_cpp_agent.chat_history.messages import Roles
207
- # import gradio as gr
208
- # from huggingface_hub import hf_hub_download
 
 
 
 
 
 
 
 
209
 
210
  # hf_hub_download(
211
  # repo_id="bartowski/Meta-Llama-3-70B-Instruct-GGUF",
212
  # filename="Meta-Llama-3-70B-Instruct-Q3_K_M.gguf",
213
  # local_dir = "./models"
214
  # )
215
- # # hf_hub_download(
216
- # # repo_id="bartowski/Mistral-7B-Instruct-v0.3-GGUF",
217
- # # filename="Mistral-7B-Instruct-v0.3-f32.gguf",
218
- # # local_dir = "./models"
219
- # # )
220
-
221
- # css = """
222
- # .message-row {
223
- # justify-content: space-evenly !important;
224
- # }
225
- # .message-bubble-border {
226
- # border-radius: 6px !important;
227
- # }
228
- # .message-buttons-bot, .message-buttons-user {
229
- # right: 10px !important;
230
- # left: auto !important;
231
- # bottom: 2px !important;
232
- # }
233
- # .dark.message-bubble-border {
234
- # border-color: #343140 !important;
235
- # }
236
- # .dark.user {
237
- # background: #1e1c26 !important;
238
- # }
239
- # .dark.assistant.dark, .dark.pending.dark {
240
- # background: #16141c !important;
241
- # }
242
- # """
243
-
244
- # def get_messages_formatter_type(model_name):
245
- # if "Llama" in model_name:
246
- # return MessagesFormatterType.LLAMA_3
247
- # elif "Mistral" in model_name:
248
- # return MessagesFormatterType.MISTRAL
249
- # else:
250
- # raise ValueError(f"Unsupported model: {model_name}")
251
 
252
- # @spaces.GPU(duration=120)
253
- # def respond(
254
- # message,
255
- # history: list[tuple[str, str]],
256
- # model,
257
- # system_message,
258
- # max_tokens,
259
- # temperature,
260
- # top_p,
261
- # top_k,
262
- # repeat_penalty,
263
- # ):
264
- # chat_template = get_messages_formatter_type(model)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
265
 
266
  # llm = Llama(
267
- # model_path=f"models/{model}",
268
  # flash_attn=True,
269
  # n_gpu_layers=81,
270
  # n_batch=1024,
@@ -274,37 +105,20 @@ iface.launch()
274
 
275
  # agent = LlamaCppAgent(
276
  # provider,
277
- # system_prompt=f"{system_message}",
278
- # predefined_messages_formatter_type=chat_template,
279
- # debug_output=True
280
  # )
281
 
282
  # settings = provider.get_provider_default_settings()
283
- # settings.temperature = temperature
284
- # settings.top_k = top_k
285
- # settings.top_p = top_p
286
- # settings.max_tokens = max_tokens
287
- # settings.repeat_penalty = repeat_penalty
288
  # settings.stream = True
289
 
290
- # messages = BasicChatHistory()
291
-
292
- # for msn in history:
293
- # user = {
294
- # 'role': Roles.user,
295
- # 'content': msn[0]
296
- # }
297
- # assistant = {
298
- # 'role': Roles.assistant,
299
- # 'content': msn[1]
300
- # }
301
- # messages.add_message(user)
302
- # messages.add_message(assistant)
303
 
304
  # stream = agent.get_chat_response(
305
- # message,
306
  # llm_sampling_settings=settings,
307
- # chat_history=messages,
308
  # returns_streaming_generator=True,
309
  # print_output=False
310
  # )
@@ -314,103 +128,291 @@ iface.launch()
314
  # outputs += output
315
  # yield outputs
316
 
317
- # PLACEHOLDER = """
318
- # <div class="message-bubble-border" style="display:flex; max-width: 600px; border-radius: 6px; border-width: 1px; box-shadow: 0 4px 6px rgba(0, 0, 0, 0.1); backdrop-filter: blur(10px);">
319
- # <figure style="margin: 0;">
320
- # <img src="https://huggingface.co/spaces/pabloce/llama-cpp-agent/resolve/main/llama.jpg" alt="Logo" style="width: 100%; height: 100%; border-radius: 8px;">
321
- # </figure>
322
- # <div style="padding: .5rem 1.5rem;">
323
- # <h2 style="text-align: left; font-size: 1.5rem; font-weight: 700; margin-bottom: 0.5rem;">llama-cpp-agent</h2>
324
- # <p style="text-align: left; font-size: 16px; line-height: 1.5; margin-bottom: 15px;">The llama-cpp-agent framework simplifies interactions with Large Language Models (LLMs), providing an interface for chatting, executing function calls, generating structured output, performing retrieval augmented generation, and processing text using agentic chains with tools.</p>
325
- # <div style="display: flex; justify-content: space-between; align-items: center;">
326
- # <div style="display: flex; flex-flow: column; justify-content: space-between;">
327
- # <span style="display: inline-flex; align-items: center; border-radius: 0.375rem; background-color: rgba(229, 70, 77, 0.1); padding: 0.1rem 0.75rem; font-size: 0.75rem; font-weight: 500; color: #f88181; margin-bottom: 2.5px;">
328
- # Mistral 7B Instruct v0.3
329
- # </span>
330
- # <span style="display: inline-flex; align-items: center; border-radius: 0.375rem; background-color: rgba(79, 70, 229, 0.1); padding: 0.1rem 0.75rem; font-size: 0.75rem; font-weight: 500; color: #60a5fa; margin-top: 2.5px;">
331
- # Meta Llama 3 70B Instruct
332
- # </span>
333
- # </div>
334
- # <div style="display: flex; justify-content: flex-end; align-items: center;">
335
- # <a href="https://discord.gg/sRMvWKrh" target="_blank" rel="noreferrer" style="padding: .5rem;">
336
- # <svg width="24" height="24" fill="currentColor" xmlns="http://www.w3.org/2000/svg" viewBox="0 5 30.67 23.25">
337
- # <title>Discord</title>
338
- # <path d="M26.0015 6.9529C24.0021 6.03845 21.8787 5.37198 19.6623 5C19.3833 5.48048 19.0733 6.13144 18.8563 6.64292C16.4989 6.30193 14.1585 6.30193 11.8336 6.64292C11.6166 6.13144 11.2911 5.48048 11.0276 5C8.79575 5.37198 6.67235 6.03845 4.6869 6.9529C0.672601 12.8736 -0.41235 18.6548 0.130124 24.3585C2.79599 26.2959 5.36889 27.4739 7.89682 28.2489C8.51679 27.4119 9.07477 26.5129 9.55525 25.5675C8.64079 25.2265 7.77283 24.808 6.93587 24.312C7.15286 24.1571 7.36986 23.9866 7.57135 23.8161C12.6241 26.1255 18.0969 26.1255 23.0876 23.8161C23.3046 23.9866 23.5061 24.1571 23.7231 24.312C22.8861 24.808 22.0182 25.2265 21.1037 25.5675C21.5842 26.5129 22.1422 27.4119 22.7621 28.2489C25.2885 27.4739 27.8769 26.2959 30.5288 24.3585C31.1952 17.7559 29.4733 12.0212 26.0015 6.9529ZM10.2527 20.8402C8.73376 20.8402 7.49382 19.4608 7.49382 17.7714C7.49382 16.082 8.70276 14.7025 10.2527 14.7025C11.7871 14.7025 13.0425 16.082 13.0115 17.7714C13.0115 19.4608 11.7871 20.8402 10.2527 20.8402ZM20.4373 20.8402C18.9183 20.8402 17.6768 19.4608 17.6768 17.7714C17.6768 16.082 18.8873 14.7025 20.4373 14.7025C21.9717 14.7025 23.2271 16.082 23.1961 17.7714C23.1961 19.4608 21.9872 20.8402 20.4373 20.8402Z"></path>
339
- # </svg>
340
- # </a>
341
- # <a href="https://github.com/Maximilian-Winter/llama-cpp-agent" target="_blank" rel="noreferrer" style="padding: .5rem;">
342
- # <svg width="24" height="24" fill="currentColor" viewBox="3 3 18 18">
343
- # <title>GitHub</title>
344
- # <path d="M12 3C7.0275 3 3 7.12937 3 12.2276C3 16.3109 5.57625 19.7597 9.15374 20.9824C9.60374 21.0631 9.77249 20.7863 9.77249 20.5441C9.77249 20.3249 9.76125 19.5982 9.76125 18.8254C7.5 19.2522 6.915 18.2602 6.735 17.7412C6.63375 17.4759 6.19499 16.6569 5.8125 16.4378C5.4975 16.2647 5.0475 15.838 5.80124 15.8264C6.51 15.8149 7.01625 16.4954 7.18499 16.7723C7.99499 18.1679 9.28875 17.7758 9.80625 17.5335C9.885 16.9337 10.1212 16.53 10.38 16.2993C8.3775 16.0687 6.285 15.2728 6.285 11.7432C6.285 10.7397 6.63375 9.9092 7.20749 9.26326C7.1175 9.03257 6.8025 8.08674 7.2975 6.81794C7.2975 6.81794 8.05125 6.57571 9.77249 7.76377C10.4925 7.55615 11.2575 7.45234 12.0225 7.45234C12.7875 7.45234 13.5525 7.55615 14.2725 7.76377C15.9937 6.56418 16.7475 6.81794 16.7475 6.81794C17.2424 8.08674 16.9275 9.03257 16.8375 9.26326C17.4113 9.9092 17.76 10.7281 17.76 11.7432C17.76 15.2843 15.6563 16.0687 13.6537 16.2993C13.98 16.5877 14.2613 17.1414 14.2613 18.0065C14.2613 19.2407 14.25 20.2326 14.25 20.5441C14.25 20.7863 14.4188 21.0746 14.8688 20.9824C16.6554 20.364 18.2079 19.1866 19.3078 17.6162C20.4077 16.0457 20.9995 14.1611 21 12.2276C21 7.12937 16.9725 3 12 3Z"></path>
345
- # </svg>
346
- # </a>
347
- # </div>
348
- # </div>
349
- # </div>
350
- # </div>
351
- # """
352
-
353
- # demo = gr.ChatInterface(
354
- # respond,
355
- # additional_inputs=[
356
- # gr.Dropdown([
357
- # 'Meta-Llama-3-70B-Instruct-Q3_K_M.gguf',
358
- # 'Mistral-7B-Instruct-v0.3-f32.gguf'
359
- # ],
360
- # value="Meta-Llama-3-70B-Instruct-Q3_K_M.gguf",
361
- # label="Model"
362
- # ),
363
- # gr.Textbox(value="You are a helpful assistant.", label="System message"),
364
- # gr.Slider(minimum=1, maximum=4096, value=2048, step=1, label="Max tokens"),
365
- # gr.Slider(minimum=0.1, maximum=4.0, value=0.7, step=0.1, label="Temperature"),
366
- # gr.Slider(
367
- # minimum=0.1,
368
- # maximum=1.0,
369
- # value=0.95,
370
- # step=0.05,
371
- # label="Top-p",
372
- # ),
373
- # gr.Slider(
374
- # minimum=0,
375
- # maximum=100,
376
- # value=40,
377
- # step=1,
378
- # label="Top-k",
379
- # ),
380
- # gr.Slider(
381
- # minimum=0.0,
382
- # maximum=2.0,
383
- # value=1.1,
384
- # step=0.1,
385
- # label="Repetition penalty",
386
- # ),
387
- # ],
388
- # theme=gr.themes.Soft(primary_hue="violet", secondary_hue="violet", neutral_hue="gray",font=[gr.themes.GoogleFont("Exo"), "ui-sans-serif", "system-ui", "sans-serif"]).set(
389
- # body_background_fill_dark="#16141c",
390
- # block_background_fill_dark="#16141c",
391
- # block_border_width="1px",
392
- # block_title_background_fill_dark="#1e1c26",
393
- # input_background_fill_dark="#292733",
394
- # button_secondary_background_fill_dark="#24212b",
395
- # border_color_accent_dark="#343140",
396
- # border_color_primary_dark="#343140",
397
- # background_fill_secondary_dark="#16141c",
398
- # color_accent_soft_dark="transparent",
399
- # code_background_fill_dark="#292733",
400
- # ),
401
- # css=css,
402
- # retry_btn="Retry",
403
- # undo_btn="Undo",
404
- # clear_btn="Clear",
405
- # submit_btn="Send",
406
- # description="Llama-cpp-agent: Chat multi llm selection",
407
- # chatbot=gr.Chatbot(
408
- # scale=1,
409
- # placeholder=PLACEHOLDER,
410
- # likeable=False,
411
- # show_copy_button=True
412
- # )
413
  # )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
414
 
415
- # if __name__ == "__main__":
416
- # demo.launch()
 
1
+ # from pdfminer.high_level import extract_pages
2
+ # from pdfminer.layout import LTTextContainer
3
+ # from tqdm import tqdm
4
+ # import re
5
+ # import gradio as gr
6
+ # import os
7
+ # import accelerate
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
8
  # import spaces
 
9
  # import subprocess
10
+ # from huggingface_hub import hf_hub_download
11
  # from llama_cpp import Llama
12
  # from llama_cpp_agent import LlamaCppAgent, MessagesFormatterType
13
  # from llama_cpp_agent.providers import LlamaCppPythonProvider
14
  # from llama_cpp_agent.chat_history import BasicChatHistory
15
  # from llama_cpp_agent.chat_history.messages import Roles
16
+
17
+ # # subprocess.run('pip install llama-cpp-python==0.2.75 --extra-index-url https://abetlen.github.io/llama-cpp-python/whl/cu124', shell=True)
18
+ # # subprocess.run('pip install llama-cpp-agent==0.2.10', shell=True)
19
+
20
+
21
+ # # hf_hub_download(
22
+ # # repo_id="QuantFactory/Meta-Llama-3-8B-Instruct-GGUF",
23
+ # # filename="Meta-Llama-3-8B-Instruct.Q8_0.gguf",
24
+ # # local_dir = "./models"
25
+ # # )
26
 
27
  # hf_hub_download(
28
  # repo_id="bartowski/Meta-Llama-3-70B-Instruct-GGUF",
29
  # filename="Meta-Llama-3-70B-Instruct-Q3_K_M.gguf",
30
  # local_dir = "./models"
31
  # )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
32
 
33
+ # def process_document(pdf_path, page_ids=None):
34
+ # extracted_pages = extract_pages(pdf_path, page_numbers=page_ids)
35
+
36
+ # page2content = {}
37
+
38
+ # for extracted_page in tqdm(extracted_pages):
39
+ # page_id = extracted_page.pageid
40
+ # content = process_page(extracted_page)
41
+ # page2content[page_id] = content
42
+
43
+ # return page2content
44
+
45
+
46
+ # def process_page(extracted_page):
47
+ # content = []
48
+ # elements = [element for element in extracted_page._objs]
49
+ # elements.sort(key=lambda a: a.y1, reverse=True)
50
+ # for i, element in enumerate(elements):
51
+ # if isinstance(element, LTTextContainer):
52
+ # line_text = extract_text_and_normalize(element)
53
+ # content.append(line_text)
54
+ # content = re.sub('\n+', ' ', ''.join(content))
55
+ # return content
56
+
57
+
58
+ # def extract_text_and_normalize(element):
59
+ # # Extract text from line and split it with new lines
60
+ # line_texts = element.get_text().split('\n')
61
+ # norm_text = ''
62
+ # for line_text in line_texts:
63
+ # line_text = line_text.strip()
64
+ # if not line_text:
65
+ # line_text = '\n'
66
+ # else:
67
+ # line_text = re.sub('\s+', ' ', line_text)
68
+ # if not re.search('[\w\d\,\-]', line_text[-1]):
69
+ # line_text += '\n'
70
+ # else:
71
+ # line_text += ' '
72
+ # norm_text += line_text
73
+ # return norm_text
74
+
75
+
76
+ # def txt_to_html(text):
77
+ # html_content = "<html><body>"
78
+ # for line in text.split('\n'):
79
+ # html_content += "<p>{}</p>".format(line.strip())
80
+ # html_content += "</body></html>"
81
+ # return html_content
82
+
83
+
84
+ # def deidentify_doc(pdftext, maxtokens, temperature, top_probability):
85
+ # prompt = "In the following text replace any person name and any address with term [redacted], replace any Date of Birth and NHS number with term [redacted]"
86
+
87
+ # # model_id = "models/Meta-Llama-3-70B-Instruct-Q3_K_M.gguf"
88
+ # # # model = Llama(model_path=model_id, n_ctx=2048, n_threads=8, n_gpu_layers=-1, n_batch=128)
89
+ # # model = Llama(
90
+ # # model_path=model_id,
91
+ # # flash_attn=True,
92
+ # # n_gpu_layers=81,
93
+ # # n_batch=1024,
94
+ # # n_ctx=8192,
95
+ # # )
96
 
97
  # llm = Llama(
98
+ # model_path="models/Meta-Llama-3-70B-Instruct-Q3_K_M.gguf",
99
  # flash_attn=True,
100
  # n_gpu_layers=81,
101
  # n_batch=1024,
 
105
 
106
  # agent = LlamaCppAgent(
107
  # provider,
 
 
 
108
  # )
109
 
110
  # settings = provider.get_provider_default_settings()
111
+ # settings.temperature = 0.7
112
+ # settings.top_k = 40
113
+ # settings.top_p = 0.95
114
+ # settings.max_tokens = 2048
115
+ # settings.repeat_penalty = 1.1
116
  # settings.stream = True
117
 
 
 
 
 
 
 
 
 
 
 
 
 
 
118
 
119
  # stream = agent.get_chat_response(
120
+ # prompt + ' : ' + pdftext,
121
  # llm_sampling_settings=settings,
 
122
  # returns_streaming_generator=True,
123
  # print_output=False
124
  # )
 
128
  # outputs += output
129
  # yield outputs
130
 
131
+ # # output = model.create_chat_completion(
132
+ # # messages=[
133
+ # # {"role": "assistant", "content": prompt},
134
+ # # {
135
+ # # "role": "user",
136
+ # # "content": pdftext
137
+ # # }
138
+ # # ],
139
+ # # max_tokens=maxtokens,
140
+ # # temperature=temperature
141
+ # # )
142
+ # # output = output['choices'][0]['message']['content']
143
+
144
+ # # prompt = "Perform the following actions on given text: 1. Replace any person age with term [redacted] 2. DO NOT REPLACE ANY MEDICAL MEASUREMENTS 3. Replace only the CALENDAR DATES of format 'day/month/year' with term [redacted]"
145
+ # # output = model.create_chat_completion(
146
+ # # messages=[
147
+ # # {"role": "assistant", "content": prompt},
148
+ # # {
149
+ # # "role": "user",
150
+ # # "content": output
151
+ # # }
152
+ # # ],
153
+ # # max_tokens=maxtokens,
154
+ # # temperature=temperature
155
+ # # )
156
+ # # output = output['choices'][0]['message']['content']
157
+
158
+ # # print(prompt)
159
+ # # print(output)
160
+ # # print('-------------------------------------------------------')
161
+
162
+ # # return outputs
163
+
164
+ # @spaces.GPU(duration=120)
165
+ # def pdf_to_text(files, maxtokens=2048, temperature=0, top_probability=0.95):
166
+ # files=[files]#remove later
167
+ # for file in files:
168
+ # file_name = os.path.basename(file)
169
+ # file_name_splt = file_name.split('.')
170
+ # # print('File name is ', file_name)
171
+ # if (len(file_name_splt) > 1 and file_name_splt[1] == 'pdf'):
172
+ # page2content = process_document(file, page_ids=[0])
173
+ # pdftext = page2content[1]
174
+ # # pdftext = file # remove later
175
+ # if (pdftext): #shift this if block to right later
176
+ # anonymized_text = deidentify_doc(pdftext, maxtokens, temperature, top_probability)
177
+ # return anonymized_text
178
+
179
+
180
+ # css = ".gradio-container {background: 'logo.png'}"
181
+ # temp_slider = gr.Slider(minimum=0, maximum=2, value=0.9, label="Temperature Value")
182
+ # prob_slider = gr.Slider(minimum=0, maximum=1, value=0.95, label="Max Probability Value")
183
+ # max_tokens = gr.Number(value=600, label="Max Tokens")
184
+ # input_folder = gr.File(file_count='multiple')
185
+ # input_folder_text = gr.Textbox(label='Enter output folder path')
186
+ # output_text = gr.Textbox()
187
+ # output_path_component = gr.File(label="Select Output Path")
188
+ # iface = gr.Interface(
189
+ # fn=pdf_to_text,
190
+ # inputs='file',
191
+ # # inputs=["textbox", input_folder_text, "textbox", max_tokens, temp_slider, prob_slider],
192
+ # outputs=output_text,
193
+ # title='COBIx Endoscopy Report De-Identification',
194
+ # description="This application assists to remove personal information from the uploaded clinical report",
195
+ # theme=gr.themes.Soft(),
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
196
  # )
197
+ # iface.launch()
198
+
199
+ import spaces
200
+ import json
201
+ import subprocess
202
+ from llama_cpp import Llama
203
+ from llama_cpp_agent import LlamaCppAgent, MessagesFormatterType
204
+ from llama_cpp_agent.providers import LlamaCppPythonProvider
205
+ from llama_cpp_agent.chat_history import BasicChatHistory
206
+ from llama_cpp_agent.chat_history.messages import Roles
207
+ import gradio as gr
208
+ from huggingface_hub import hf_hub_download
209
+
210
+ hf_hub_download(
211
+ repo_id="bartowski/Meta-Llama-3-70B-Instruct-GGUF",
212
+ filename="Meta-Llama-3-70B-Instruct-Q3_K_M.gguf",
213
+ local_dir = "./models"
214
+ )
215
+ # hf_hub_download(
216
+ # repo_id="bartowski/Mistral-7B-Instruct-v0.3-GGUF",
217
+ # filename="Mistral-7B-Instruct-v0.3-f32.gguf",
218
+ # local_dir = "./models"
219
+ # )
220
+
221
+ css = """
222
+ .message-row {
223
+ justify-content: space-evenly !important;
224
+ }
225
+ .message-bubble-border {
226
+ border-radius: 6px !important;
227
+ }
228
+ .message-buttons-bot, .message-buttons-user {
229
+ right: 10px !important;
230
+ left: auto !important;
231
+ bottom: 2px !important;
232
+ }
233
+ .dark.message-bubble-border {
234
+ border-color: #343140 !important;
235
+ }
236
+ .dark.user {
237
+ background: #1e1c26 !important;
238
+ }
239
+ .dark.assistant.dark, .dark.pending.dark {
240
+ background: #16141c !important;
241
+ }
242
+ """
243
+
244
+ def get_messages_formatter_type(model_name):
245
+ if "Llama" in model_name:
246
+ return MessagesFormatterType.LLAMA_3
247
+ elif "Mistral" in model_name:
248
+ return MessagesFormatterType.MISTRAL
249
+ else:
250
+ raise ValueError(f"Unsupported model: {model_name}")
251
+
252
+ @spaces.GPU(duration=120)
253
+ def respond(
254
+ message,
255
+ history: list[tuple[str, str]],
256
+ model,
257
+ system_message,
258
+ max_tokens,
259
+ temperature,
260
+ top_p,
261
+ top_k,
262
+ repeat_penalty,
263
+ ):
264
+ chat_template = get_messages_formatter_type(model)
265
+
266
+ llm = Llama(
267
+ model_path=f"models/{model}",
268
+ flash_attn=True,
269
+ n_gpu_layers=81,
270
+ n_batch=1024,
271
+ n_ctx=8192,
272
+ )
273
+ provider = LlamaCppPythonProvider(llm)
274
+
275
+ agent = LlamaCppAgent(
276
+ provider,
277
+ system_prompt=f"{system_message}",
278
+ predefined_messages_formatter_type=chat_template,
279
+ debug_output=True
280
+ )
281
+
282
+ settings = provider.get_provider_default_settings()
283
+ settings.temperature = temperature
284
+ settings.top_k = top_k
285
+ settings.top_p = top_p
286
+ settings.max_tokens = max_tokens
287
+ settings.repeat_penalty = repeat_penalty
288
+ settings.stream = True
289
+
290
+ messages = BasicChatHistory()
291
+
292
+ for msn in history:
293
+ print('MSN 0 is ', msn[0])
294
+ user = {
295
+ 'role': Roles.user,
296
+ 'content': msn[0]
297
+ }
298
+ print('MSN 1 is ', msn[1])
299
+ assistant = {
300
+ 'role': Roles.assistant,
301
+ 'content': msn[1]
302
+ }
303
+ messages.add_message(user)
304
+ messages.add_message(assistant)
305
+ print('MESSAGE IS ',message)
306
+ stream = agent.get_chat_response(
307
+ message,
308
+ llm_sampling_settings=settings,
309
+ chat_history=messages,
310
+ returns_streaming_generator=True,
311
+ print_output=False
312
+ )
313
+
314
+ outputs = ""
315
+ for output in stream:
316
+ outputs += output
317
+ yield outputs
318
+
319
+ PLACEHOLDER = """
320
+ <div class="message-bubble-border" style="display:flex; max-width: 600px; border-radius: 6px; border-width: 1px; box-shadow: 0 4px 6px rgba(0, 0, 0, 0.1); backdrop-filter: blur(10px);">
321
+ <figure style="margin: 0;">
322
+ <img src="https://huggingface.co/spaces/pabloce/llama-cpp-agent/resolve/main/llama.jpg" alt="Logo" style="width: 100%; height: 100%; border-radius: 8px;">
323
+ </figure>
324
+ <div style="padding: .5rem 1.5rem;">
325
+ <h2 style="text-align: left; font-size: 1.5rem; font-weight: 700; margin-bottom: 0.5rem;">llama-cpp-agent</h2>
326
+ <p style="text-align: left; font-size: 16px; line-height: 1.5; margin-bottom: 15px;">The llama-cpp-agent framework simplifies interactions with Large Language Models (LLMs), providing an interface for chatting, executing function calls, generating structured output, performing retrieval augmented generation, and processing text using agentic chains with tools.</p>
327
+ <div style="display: flex; justify-content: space-between; align-items: center;">
328
+ <div style="display: flex; flex-flow: column; justify-content: space-between;">
329
+ <span style="display: inline-flex; align-items: center; border-radius: 0.375rem; background-color: rgba(229, 70, 77, 0.1); padding: 0.1rem 0.75rem; font-size: 0.75rem; font-weight: 500; color: #f88181; margin-bottom: 2.5px;">
330
+ Mistral 7B Instruct v0.3
331
+ </span>
332
+ <span style="display: inline-flex; align-items: center; border-radius: 0.375rem; background-color: rgba(79, 70, 229, 0.1); padding: 0.1rem 0.75rem; font-size: 0.75rem; font-weight: 500; color: #60a5fa; margin-top: 2.5px;">
333
+ Meta Llama 3 70B Instruct
334
+ </span>
335
+ </div>
336
+ <div style="display: flex; justify-content: flex-end; align-items: center;">
337
+ <a href="https://discord.gg/sRMvWKrh" target="_blank" rel="noreferrer" style="padding: .5rem;">
338
+ <svg width="24" height="24" fill="currentColor" xmlns="http://www.w3.org/2000/svg" viewBox="0 5 30.67 23.25">
339
+ <title>Discord</title>
340
+ <path d="M26.0015 6.9529C24.0021 6.03845 21.8787 5.37198 19.6623 5C19.3833 5.48048 19.0733 6.13144 18.8563 6.64292C16.4989 6.30193 14.1585 6.30193 11.8336 6.64292C11.6166 6.13144 11.2911 5.48048 11.0276 5C8.79575 5.37198 6.67235 6.03845 4.6869 6.9529C0.672601 12.8736 -0.41235 18.6548 0.130124 24.3585C2.79599 26.2959 5.36889 27.4739 7.89682 28.2489C8.51679 27.4119 9.07477 26.5129 9.55525 25.5675C8.64079 25.2265 7.77283 24.808 6.93587 24.312C7.15286 24.1571 7.36986 23.9866 7.57135 23.8161C12.6241 26.1255 18.0969 26.1255 23.0876 23.8161C23.3046 23.9866 23.5061 24.1571 23.7231 24.312C22.8861 24.808 22.0182 25.2265 21.1037 25.5675C21.5842 26.5129 22.1422 27.4119 22.7621 28.2489C25.2885 27.4739 27.8769 26.2959 30.5288 24.3585C31.1952 17.7559 29.4733 12.0212 26.0015 6.9529ZM10.2527 20.8402C8.73376 20.8402 7.49382 19.4608 7.49382 17.7714C7.49382 16.082 8.70276 14.7025 10.2527 14.7025C11.7871 14.7025 13.0425 16.082 13.0115 17.7714C13.0115 19.4608 11.7871 20.8402 10.2527 20.8402ZM20.4373 20.8402C18.9183 20.8402 17.6768 19.4608 17.6768 17.7714C17.6768 16.082 18.8873 14.7025 20.4373 14.7025C21.9717 14.7025 23.2271 16.082 23.1961 17.7714C23.1961 19.4608 21.9872 20.8402 20.4373 20.8402Z"></path>
341
+ </svg>
342
+ </a>
343
+ <a href="https://github.com/Maximilian-Winter/llama-cpp-agent" target="_blank" rel="noreferrer" style="padding: .5rem;">
344
+ <svg width="24" height="24" fill="currentColor" viewBox="3 3 18 18">
345
+ <title>GitHub</title>
346
+ <path d="M12 3C7.0275 3 3 7.12937 3 12.2276C3 16.3109 5.57625 19.7597 9.15374 20.9824C9.60374 21.0631 9.77249 20.7863 9.77249 20.5441C9.77249 20.3249 9.76125 19.5982 9.76125 18.8254C7.5 19.2522 6.915 18.2602 6.735 17.7412C6.63375 17.4759 6.19499 16.6569 5.8125 16.4378C5.4975 16.2647 5.0475 15.838 5.80124 15.8264C6.51 15.8149 7.01625 16.4954 7.18499 16.7723C7.99499 18.1679 9.28875 17.7758 9.80625 17.5335C9.885 16.9337 10.1212 16.53 10.38 16.2993C8.3775 16.0687 6.285 15.2728 6.285 11.7432C6.285 10.7397 6.63375 9.9092 7.20749 9.26326C7.1175 9.03257 6.8025 8.08674 7.2975 6.81794C7.2975 6.81794 8.05125 6.57571 9.77249 7.76377C10.4925 7.55615 11.2575 7.45234 12.0225 7.45234C12.7875 7.45234 13.5525 7.55615 14.2725 7.76377C15.9937 6.56418 16.7475 6.81794 16.7475 6.81794C17.2424 8.08674 16.9275 9.03257 16.8375 9.26326C17.4113 9.9092 17.76 10.7281 17.76 11.7432C17.76 15.2843 15.6563 16.0687 13.6537 16.2993C13.98 16.5877 14.2613 17.1414 14.2613 18.0065C14.2613 19.2407 14.25 20.2326 14.25 20.5441C14.25 20.7863 14.4188 21.0746 14.8688 20.9824C16.6554 20.364 18.2079 19.1866 19.3078 17.6162C20.4077 16.0457 20.9995 14.1611 21 12.2276C21 7.12937 16.9725 3 12 3Z"></path>
347
+ </svg>
348
+ </a>
349
+ </div>
350
+ </div>
351
+ </div>
352
+ </div>
353
+ """
354
+
355
+ demo = gr.ChatInterface(
356
+ respond,
357
+ additional_inputs=[
358
+ gr.Dropdown([
359
+ 'Meta-Llama-3-70B-Instruct-Q3_K_M.gguf',
360
+ 'Mistral-7B-Instruct-v0.3-f32.gguf'
361
+ ],
362
+ value="Meta-Llama-3-70B-Instruct-Q3_K_M.gguf",
363
+ label="Model"
364
+ ),
365
+ gr.Textbox(value="You are a helpful assistant.", label="System message"),
366
+ gr.Slider(minimum=1, maximum=4096, value=2048, step=1, label="Max tokens"),
367
+ gr.Slider(minimum=0.1, maximum=4.0, value=0.7, step=0.1, label="Temperature"),
368
+ gr.Slider(
369
+ minimum=0.1,
370
+ maximum=1.0,
371
+ value=0.95,
372
+ step=0.05,
373
+ label="Top-p",
374
+ ),
375
+ gr.Slider(
376
+ minimum=0,
377
+ maximum=100,
378
+ value=40,
379
+ step=1,
380
+ label="Top-k",
381
+ ),
382
+ gr.Slider(
383
+ minimum=0.0,
384
+ maximum=2.0,
385
+ value=1.1,
386
+ step=0.1,
387
+ label="Repetition penalty",
388
+ ),
389
+ ],
390
+ theme=gr.themes.Soft(primary_hue="violet", secondary_hue="violet", neutral_hue="gray",font=[gr.themes.GoogleFont("Exo"), "ui-sans-serif", "system-ui", "sans-serif"]).set(
391
+ body_background_fill_dark="#16141c",
392
+ block_background_fill_dark="#16141c",
393
+ block_border_width="1px",
394
+ block_title_background_fill_dark="#1e1c26",
395
+ input_background_fill_dark="#292733",
396
+ button_secondary_background_fill_dark="#24212b",
397
+ border_color_accent_dark="#343140",
398
+ border_color_primary_dark="#343140",
399
+ background_fill_secondary_dark="#16141c",
400
+ color_accent_soft_dark="transparent",
401
+ code_background_fill_dark="#292733",
402
+ ),
403
+ css=css,
404
+ retry_btn="Retry",
405
+ undo_btn="Undo",
406
+ clear_btn="Clear",
407
+ submit_btn="Send",
408
+ description="Llama-cpp-agent: Chat multi llm selection",
409
+ chatbot=gr.Chatbot(
410
+ scale=1,
411
+ placeholder=PLACEHOLDER,
412
+ likeable=False,
413
+ show_copy_button=True
414
+ )
415
+ )
416
 
417
+ if __name__ == "__main__":
418
+ demo.launch()