Spaces:
Runtime error
Runtime error
Update app.py
Browse files
app.py
CHANGED
|
@@ -143,6 +143,57 @@ def generate_filename(prompt, file_type):
|
|
| 143 |
safe_prompt = "".join(x for x in replaced_prompt if x.isalnum() or x == "_")[:90]
|
| 144 |
return f"{safe_date_time}_{safe_prompt}.{file_type}"
|
| 145 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 146 |
# Chat and Chat with files
|
| 147 |
def chat_with_model(prompt, document_section, model_choice='gpt-3.5-turbo'):
|
| 148 |
model = model_choice
|
|
@@ -263,7 +314,8 @@ def display_homunculus_parts():
|
|
| 263 |
part_description = homunculus_parts_extended[part].split('(')[1].rstrip(')')
|
| 264 |
prompt = f"Learn about the key features and functions of the {part} - {part_description}"
|
| 265 |
if st.button(f"Explore {part}", key=part):
|
| 266 |
-
response = chat_with_model(prompt, part)
|
|
|
|
| 267 |
|
| 268 |
with st.expander(f"Brain ({homunculus_parts['Brain']})", expanded=False):
|
| 269 |
brain_parts = {
|
|
@@ -284,15 +336,18 @@ def display_homunculus_parts():
|
|
| 284 |
# Formatting the prompt in markdown style for enhanced learning
|
| 285 |
prompt = f"Create a markdown outline with emojis to explain the {part} and its role in the brain: {description}"
|
| 286 |
if st.button(f"Explore {part} 🧠", key=part):
|
| 287 |
-
response = chat_with_model(prompt, part)
|
| 288 |
-
|
|
|
|
| 289 |
# Displaying central body parts
|
| 290 |
central_parts = ["Neck", "Chest", "Abdomen", "Pelvis"]
|
| 291 |
for part in central_parts:
|
| 292 |
with st.expander(f"{part} ({homunculus_parts_extended[part]})", expanded=False):
|
| 293 |
prompt = f"Learn about the key features and functions of the {part} - {homunculus_parts_extended[part].split(' ')[-1]}"
|
| 294 |
if st.button(f"Explore {part} 🧣", key=part):
|
| 295 |
-
response = chat_with_model(prompt, part)
|
|
|
|
|
|
|
| 296 |
|
| 297 |
# Displaying symmetric body parts
|
| 298 |
symmetric_parts = ["Shoulder", "Upper Arm", "Elbow", "Forearm", "Wrist", "Hand", "Hip", "Thigh", "Knee", "Shin", "Foot"]
|
|
@@ -302,12 +357,15 @@ def display_homunculus_parts():
|
|
| 302 |
with st.expander(f"Left {part} ({homunculus_parts_extended[f'Left {part}']})", expanded=False):
|
| 303 |
prompt = f"Learn about the key features and functions of the Left {part} - {homunculus_parts_extended[f'Left {part}'].split(' ')[-1]}"
|
| 304 |
if st.button(f"Explore Left {part} 💪", key=f"Left {part}"):
|
| 305 |
-
response = chat_with_model(prompt, f"Left {part}")
|
|
|
|
|
|
|
| 306 |
with col2:
|
| 307 |
with st.expander(f"Right {part} ({homunculus_parts_extended[f'Right {part}']})", expanded=False):
|
| 308 |
prompt = f"Learn about the key features and functions of the Right {part} - {homunculus_parts_extended[f'Right {part}'].split(' ')[-1]}"
|
| 309 |
if st.button(f"Explore Right {part} 💪", key=f"Right {part}"):
|
| 310 |
-
response = chat_with_model(prompt, f"Right {part}")
|
|
|
|
| 311 |
|
| 312 |
|
| 313 |
# Define function to add paper buttons and links
|
|
@@ -325,28 +383,36 @@ def add_paper_buttons_and_links():
|
|
| 325 |
link_button_with_emoji("https://arxiv.org/abs/2310.08560", "MemGPT", "🧠💾 Memory OS")
|
| 326 |
outline_memgpt = "Memory Hierarchy, Context Paging, Self-directed Memory Updates, Memory Editing, Memory Retrieval, Preprompt Instructions, Semantic Memory, Episodic Memory, Emotional Contextual Understanding"
|
| 327 |
if st.button("Discuss MemGPT Features"):
|
| 328 |
-
|
|
|
|
|
|
|
| 329 |
|
| 330 |
with col2:
|
| 331 |
with st.expander("AutoGen 🤖🔗", expanded=False):
|
| 332 |
link_button_with_emoji("https://arxiv.org/abs/2308.08155", "AutoGen", "🤖🔗 Multi-Agent LLM")
|
| 333 |
outline_autogen = "Cooperative Conversations, Combining Capabilities, Complex Task Solving, Divergent Thinking, Factuality, Highly Capable Agents, Generic Abstraction, Effective Implementation"
|
| 334 |
if st.button("Explore AutoGen Multi-Agent LLM"):
|
| 335 |
-
|
|
|
|
|
|
|
| 336 |
|
| 337 |
with col3:
|
| 338 |
with st.expander("Whisper 🔊🧑🚀", expanded=False):
|
| 339 |
link_button_with_emoji("https://arxiv.org/abs/2212.04356", "Whisper", "🔊🧑🚀 Robust STT")
|
| 340 |
outline_whisper = "Scaling, Deep Learning Approaches, Weak Supervision, Zero-shot Transfer Learning, Accuracy & Robustness, Pre-training Techniques, Broad Range of Environments, Combining Multiple Datasets"
|
| 341 |
if st.button("Learn About Whisper STT"):
|
| 342 |
-
|
|
|
|
|
|
|
| 343 |
|
| 344 |
with col4:
|
| 345 |
with st.expander("ChatDev 💬💻", expanded=False):
|
| 346 |
link_button_with_emoji("https://arxiv.org/pdf/2307.07924.pdf", "ChatDev", "💬💻 Comm. Agents")
|
| 347 |
outline_chatdev = "Effective Communication, Comprehensive Software Solutions, Diverse Social Identities, Tailored Codes, Environment Dependencies, User Manuals"
|
| 348 |
if st.button("Deep Dive into ChatDev"):
|
| 349 |
-
|
|
|
|
|
|
|
| 350 |
|
| 351 |
add_paper_buttons_and_links()
|
| 352 |
|
|
@@ -381,7 +447,8 @@ def create_expanders_and_buttons(content):
|
|
| 381 |
with st.expander(header, expanded=False):
|
| 382 |
if st.button(f"Explore {header}"):
|
| 383 |
expanded_outline = "Expand on the feature: " + detail
|
| 384 |
-
chat_with_model(expanded_outline, header)
|
|
|
|
| 385 |
|
| 386 |
def extract_feature_and_detail(paragraph):
|
| 387 |
# Use regex to find the header and detail in the paragraph
|
|
@@ -404,7 +471,10 @@ def transcribe_audio(file_path, model):
|
|
| 404 |
response = requests.post(OPENAI_API_URL, headers=headers, files=data, data={'model': model})
|
| 405 |
if response.status_code == 200:
|
| 406 |
st.write(response.json())
|
| 407 |
-
|
|
|
|
|
|
|
|
|
|
| 408 |
transcript = response.json().get('text')
|
| 409 |
#st.write('Responses:')
|
| 410 |
#st.write(chatResponse)
|
|
@@ -634,7 +704,9 @@ def main():
|
|
| 634 |
else:
|
| 635 |
if st.button(f"Chat about Section {i+1}"):
|
| 636 |
st.write('Reasoning with your inputs...')
|
| 637 |
-
response = chat_with_model(user_prompt, section, model_choice)
|
|
|
|
|
|
|
| 638 |
st.write('Response:')
|
| 639 |
st.write(response)
|
| 640 |
document_responses[i] = response
|
|
@@ -650,7 +722,8 @@ def main():
|
|
| 650 |
full_response = ''
|
| 651 |
for prompt_section in user_prompt_sections:
|
| 652 |
# Process each section with the model
|
| 653 |
-
response = chat_with_model(prompt_section, ''.join(list(document_sections)), model_choice)
|
|
|
|
| 654 |
full_response += response + '\n' # Combine the responses
|
| 655 |
response = full_response
|
| 656 |
st.write('Response:')
|
|
@@ -711,7 +784,9 @@ def main():
|
|
| 711 |
if next_action=='search':
|
| 712 |
file_content_area = st.text_area("File Contents:", file_contents, height=500)
|
| 713 |
st.write('Reasoning with your inputs...')
|
| 714 |
-
response = chat_with_model(user_prompt, file_contents, model_choice)
|
|
|
|
|
|
|
| 715 |
filename = generate_filename(file_contents, choice)
|
| 716 |
create_file(filename, user_prompt, response, should_save)
|
| 717 |
|
|
|
|
| 143 |
safe_prompt = "".join(x for x in replaced_prompt if x.isalnum() or x == "_")[:90]
|
| 144 |
return f"{safe_date_time}_{safe_prompt}.{file_type}"
|
| 145 |
|
| 146 |
+
|
| 147 |
+
|
| 148 |
+
# 3. Stream Llama Response
|
| 149 |
+
# @st.cache_resource
|
| 150 |
+
def StreamLLMChatResponse(prompt):
|
| 151 |
+
|
| 152 |
+
# My Inference API Copy
|
| 153 |
+
API_URL = 'https://qe55p8afio98s0u3.us-east-1.aws.endpoints.huggingface.cloud' # Dr Llama
|
| 154 |
+
API_KEY = os.getenv('API_KEY')
|
| 155 |
+
|
| 156 |
+
try:
|
| 157 |
+
endpoint_url = API_URL
|
| 158 |
+
hf_token = API_KEY
|
| 159 |
+
client = InferenceClient(endpoint_url, token=hf_token)
|
| 160 |
+
gen_kwargs = dict(
|
| 161 |
+
max_new_tokens=512,
|
| 162 |
+
top_k=30,
|
| 163 |
+
top_p=0.9,
|
| 164 |
+
temperature=0.2,
|
| 165 |
+
repetition_penalty=1.02,
|
| 166 |
+
stop_sequences=["\nUser:", "<|endoftext|>", "</s>"],
|
| 167 |
+
)
|
| 168 |
+
stream = client.text_generation(prompt, stream=True, details=True, **gen_kwargs)
|
| 169 |
+
report=[]
|
| 170 |
+
res_box = st.empty()
|
| 171 |
+
collected_chunks=[]
|
| 172 |
+
collected_messages=[]
|
| 173 |
+
allresults=''
|
| 174 |
+
for r in stream:
|
| 175 |
+
if r.token.special:
|
| 176 |
+
continue
|
| 177 |
+
if r.token.text in gen_kwargs["stop_sequences"]:
|
| 178 |
+
break
|
| 179 |
+
collected_chunks.append(r.token.text)
|
| 180 |
+
chunk_message = r.token.text
|
| 181 |
+
collected_messages.append(chunk_message)
|
| 182 |
+
try:
|
| 183 |
+
report.append(r.token.text)
|
| 184 |
+
if len(r.token.text) > 0:
|
| 185 |
+
result="".join(report).strip()
|
| 186 |
+
res_box.markdown(f'*{result}*')
|
| 187 |
+
|
| 188 |
+
except:
|
| 189 |
+
st.write('Stream llm issue')
|
| 190 |
+
SpeechSynthesis(result)
|
| 191 |
+
return result
|
| 192 |
+
except:
|
| 193 |
+
st.write('Llama model is asleep. Starting up now on A10 - please give 5 minutes then retry as KEDA scales up from zero to activate running container(s).')
|
| 194 |
+
|
| 195 |
+
|
| 196 |
+
|
| 197 |
# Chat and Chat with files
|
| 198 |
def chat_with_model(prompt, document_section, model_choice='gpt-3.5-turbo'):
|
| 199 |
model = model_choice
|
|
|
|
| 314 |
part_description = homunculus_parts_extended[part].split('(')[1].rstrip(')')
|
| 315 |
prompt = f"Learn about the key features and functions of the {part} - {part_description}"
|
| 316 |
if st.button(f"Explore {part}", key=part):
|
| 317 |
+
#response = chat_with_model(prompt, part) # GPT
|
| 318 |
+
response = StreamLLMChatResponse(prompt) # Llama
|
| 319 |
|
| 320 |
with st.expander(f"Brain ({homunculus_parts['Brain']})", expanded=False):
|
| 321 |
brain_parts = {
|
|
|
|
| 336 |
# Formatting the prompt in markdown style for enhanced learning
|
| 337 |
prompt = f"Create a markdown outline with emojis to explain the {part} and its role in the brain: {description}"
|
| 338 |
if st.button(f"Explore {part} 🧠", key=part):
|
| 339 |
+
#response = chat_with_model(prompt, part)
|
| 340 |
+
response = StreamLLMChatResponse(prompt) # Llama
|
| 341 |
+
|
| 342 |
# Displaying central body parts
|
| 343 |
central_parts = ["Neck", "Chest", "Abdomen", "Pelvis"]
|
| 344 |
for part in central_parts:
|
| 345 |
with st.expander(f"{part} ({homunculus_parts_extended[part]})", expanded=False):
|
| 346 |
prompt = f"Learn about the key features and functions of the {part} - {homunculus_parts_extended[part].split(' ')[-1]}"
|
| 347 |
if st.button(f"Explore {part} 🧣", key=part):
|
| 348 |
+
#response = chat_with_model(prompt, part)
|
| 349 |
+
response = StreamLLMChatResponse(prompt) # Llama
|
| 350 |
+
|
| 351 |
|
| 352 |
# Displaying symmetric body parts
|
| 353 |
symmetric_parts = ["Shoulder", "Upper Arm", "Elbow", "Forearm", "Wrist", "Hand", "Hip", "Thigh", "Knee", "Shin", "Foot"]
|
|
|
|
| 357 |
with st.expander(f"Left {part} ({homunculus_parts_extended[f'Left {part}']})", expanded=False):
|
| 358 |
prompt = f"Learn about the key features and functions of the Left {part} - {homunculus_parts_extended[f'Left {part}'].split(' ')[-1]}"
|
| 359 |
if st.button(f"Explore Left {part} 💪", key=f"Left {part}"):
|
| 360 |
+
#response = chat_with_model(prompt, f"Left {part}")
|
| 361 |
+
response = StreamLLMChatResponse(prompt) # Llama
|
| 362 |
+
|
| 363 |
with col2:
|
| 364 |
with st.expander(f"Right {part} ({homunculus_parts_extended[f'Right {part}']})", expanded=False):
|
| 365 |
prompt = f"Learn about the key features and functions of the Right {part} - {homunculus_parts_extended[f'Right {part}'].split(' ')[-1]}"
|
| 366 |
if st.button(f"Explore Right {part} 💪", key=f"Right {part}"):
|
| 367 |
+
#response = chat_with_model(prompt, f"Right {part}")
|
| 368 |
+
response = StreamLLMChatResponse(prompt) # Llama
|
| 369 |
|
| 370 |
|
| 371 |
# Define function to add paper buttons and links
|
|
|
|
| 383 |
link_button_with_emoji("https://arxiv.org/abs/2310.08560", "MemGPT", "🧠💾 Memory OS")
|
| 384 |
outline_memgpt = "Memory Hierarchy, Context Paging, Self-directed Memory Updates, Memory Editing, Memory Retrieval, Preprompt Instructions, Semantic Memory, Episodic Memory, Emotional Contextual Understanding"
|
| 385 |
if st.button("Discuss MemGPT Features"):
|
| 386 |
+
prompt = "Discuss the key features of MemGPT: " + outline_memgpt
|
| 387 |
+
#chat_with_model(prompt, "MemGPT")
|
| 388 |
+
response = StreamLLMChatResponse(prompt) # Llama
|
| 389 |
|
| 390 |
with col2:
|
| 391 |
with st.expander("AutoGen 🤖🔗", expanded=False):
|
| 392 |
link_button_with_emoji("https://arxiv.org/abs/2308.08155", "AutoGen", "🤖🔗 Multi-Agent LLM")
|
| 393 |
outline_autogen = "Cooperative Conversations, Combining Capabilities, Complex Task Solving, Divergent Thinking, Factuality, Highly Capable Agents, Generic Abstraction, Effective Implementation"
|
| 394 |
if st.button("Explore AutoGen Multi-Agent LLM"):
|
| 395 |
+
prompt = "Explore the key features of AutoGen: " + outline_autogen
|
| 396 |
+
#chat_with_model(prompt, "AutoGen")
|
| 397 |
+
response = StreamLLMChatResponse(prompt) # Llama
|
| 398 |
|
| 399 |
with col3:
|
| 400 |
with st.expander("Whisper 🔊🧑🚀", expanded=False):
|
| 401 |
link_button_with_emoji("https://arxiv.org/abs/2212.04356", "Whisper", "🔊🧑🚀 Robust STT")
|
| 402 |
outline_whisper = "Scaling, Deep Learning Approaches, Weak Supervision, Zero-shot Transfer Learning, Accuracy & Robustness, Pre-training Techniques, Broad Range of Environments, Combining Multiple Datasets"
|
| 403 |
if st.button("Learn About Whisper STT"):
|
| 404 |
+
prompt = "Learn about the key features of Whisper: " + outline_whisper
|
| 405 |
+
#chat_with_model(prompt, "Whisper")
|
| 406 |
+
response = StreamLLMChatResponse(prompt) # Llama
|
| 407 |
|
| 408 |
with col4:
|
| 409 |
with st.expander("ChatDev 💬💻", expanded=False):
|
| 410 |
link_button_with_emoji("https://arxiv.org/pdf/2307.07924.pdf", "ChatDev", "💬💻 Comm. Agents")
|
| 411 |
outline_chatdev = "Effective Communication, Comprehensive Software Solutions, Diverse Social Identities, Tailored Codes, Environment Dependencies, User Manuals"
|
| 412 |
if st.button("Deep Dive into ChatDev"):
|
| 413 |
+
prompt = "Deep dive into the features of ChatDev: " + outline_chatdev
|
| 414 |
+
#chat_with_model(prompt, "ChatDev")
|
| 415 |
+
response = StreamLLMChatResponse(prompt) # Llama
|
| 416 |
|
| 417 |
add_paper_buttons_and_links()
|
| 418 |
|
|
|
|
| 447 |
with st.expander(header, expanded=False):
|
| 448 |
if st.button(f"Explore {header}"):
|
| 449 |
expanded_outline = "Expand on the feature: " + detail
|
| 450 |
+
#chat_with_model(expanded_outline, header)
|
| 451 |
+
response = StreamLLMChatResponse(expanded_outline) # Llama
|
| 452 |
|
| 453 |
def extract_feature_and_detail(paragraph):
|
| 454 |
# Use regex to find the header and detail in the paragraph
|
|
|
|
| 471 |
response = requests.post(OPENAI_API_URL, headers=headers, files=data, data={'model': model})
|
| 472 |
if response.status_code == 200:
|
| 473 |
st.write(response.json())
|
| 474 |
+
prompt = response.json().get('text')
|
| 475 |
+
chatResponse = chat_with_model(prompt, '') # *************************************
|
| 476 |
+
response = StreamLLMChatResponse(prompt) # Llama
|
| 477 |
+
|
| 478 |
transcript = response.json().get('text')
|
| 479 |
#st.write('Responses:')
|
| 480 |
#st.write(chatResponse)
|
|
|
|
| 704 |
else:
|
| 705 |
if st.button(f"Chat about Section {i+1}"):
|
| 706 |
st.write('Reasoning with your inputs...')
|
| 707 |
+
#response = chat_with_model(user_prompt, section, model_choice)
|
| 708 |
+
response = StreamLLMChatResponse(user_prompt + ' ' + section) # Llama
|
| 709 |
+
|
| 710 |
st.write('Response:')
|
| 711 |
st.write(response)
|
| 712 |
document_responses[i] = response
|
|
|
|
| 722 |
full_response = ''
|
| 723 |
for prompt_section in user_prompt_sections:
|
| 724 |
# Process each section with the model
|
| 725 |
+
#response = chat_with_model(prompt_section, ''.join(list(document_sections)), model_choice)
|
| 726 |
+
response = StreamLLMChatResponse(prompt_section + ''.join(list(document_sections))) # Llama
|
| 727 |
full_response += response + '\n' # Combine the responses
|
| 728 |
response = full_response
|
| 729 |
st.write('Response:')
|
|
|
|
| 784 |
if next_action=='search':
|
| 785 |
file_content_area = st.text_area("File Contents:", file_contents, height=500)
|
| 786 |
st.write('Reasoning with your inputs...')
|
| 787 |
+
#response = chat_with_model(user_prompt, file_contents, model_choice)
|
| 788 |
+
response = StreamLLMChatResponse(user_prompt + ' ' + file_contents) # Llama
|
| 789 |
+
|
| 790 |
filename = generate_filename(file_contents, choice)
|
| 791 |
create_file(filename, user_prompt, response, should_save)
|
| 792 |
|