Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
|
@@ -15,18 +15,29 @@ class ConversationPost(BaseModel):
|
|
| 15 |
module: Union[str, None] = None
|
| 16 |
question: str
|
| 17 |
|
|
|
|
|
|
|
|
|
|
|
|
|
| 18 |
|
| 19 |
API_TOKEN = os.environ['HF_API_KEY']
|
| 20 |
|
| 21 |
os.environ["HUGGINGFACEHUB_API_TOKEN"] = API_TOKEN
|
| 22 |
|
| 23 |
app = FastAPI()
|
| 24 |
-
|
| 25 |
Kamu adalah Asisten AI yang dikembangkan oleh Jonthan Jordan. Answer strictly in Bahasa Indonesia<|im_end|>
|
| 26 |
<|im_start|>user
|
| 27 |
{question}<|im_end|>
|
| 28 |
<|im_start|>assistant
|
| 29 |
""")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 30 |
# llm = prompt | HuggingFacePipeline.from_model_id(
|
| 31 |
# model_id="Qwen/Qwen2-1.5B-Instruct",
|
| 32 |
# task="text-generation",
|
|
@@ -36,20 +47,24 @@ Kamu adalah Asisten AI yang dikembangkan oleh Jonthan Jordan. Answer strictly in
|
|
| 36 |
# },
|
| 37 |
# )
|
| 38 |
|
| 39 |
-
|
| 40 |
-
repo_id="
|
| 41 |
task="text-generation",
|
| 42 |
max_new_tokens=150,
|
| 43 |
do_sample=False,
|
| 44 |
)
|
| 45 |
|
| 46 |
-
|
| 47 |
-
repo_id="
|
| 48 |
task="text-generation",
|
| 49 |
max_new_tokens=150,
|
| 50 |
do_sample=False,
|
| 51 |
)
|
| 52 |
|
|
|
|
|
|
|
|
|
|
|
|
|
| 53 |
# llm = prompt | CustomLLM(repo_id="Qwen/Qwen-VL-Chat", model_type='text-generation', api_token=API_TOKEN, max_new_tokens=150).bind(stop=['<|im_end|>'])
|
| 54 |
|
| 55 |
|
|
@@ -65,4 +80,15 @@ async def conversation(data : ConversationPost):
|
|
| 65 |
|
| 66 |
@app.post("/conversation2")
|
| 67 |
async def conversation2(data : ConversationPost):
|
| 68 |
-
return {"output":llm2.invoke({"question":data.question})}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 15 |
module: Union[str, None] = None
|
| 16 |
question: str
|
| 17 |
|
| 18 |
+
class InferencePost(BaseModel):
|
| 19 |
+
question: str
|
| 20 |
+
with_template: Union[str, None] = None
|
| 21 |
+
|
| 22 |
|
| 23 |
API_TOKEN = os.environ['HF_API_KEY']
|
| 24 |
|
| 25 |
os.environ["HUGGINGFACEHUB_API_TOKEN"] = API_TOKEN
|
| 26 |
|
| 27 |
app = FastAPI()
|
| 28 |
+
prompt_qwen = PromptTemplate.from_template("""<|im_start|>system
|
| 29 |
Kamu adalah Asisten AI yang dikembangkan oleh Jonthan Jordan. Answer strictly in Bahasa Indonesia<|im_end|>
|
| 30 |
<|im_start|>user
|
| 31 |
{question}<|im_end|>
|
| 32 |
<|im_start|>assistant
|
| 33 |
""")
|
| 34 |
+
|
| 35 |
+
prompt_llama = PromptTemplate.from_template("""<|start_header_id|>system<|end_header_id|>
|
| 36 |
+
|
| 37 |
+
Kamu adalah Asisten AI yang dikembangkan oleh Jonthan Jordan. Answer strictly in Bahasa Indonesia<|eot_id|><|start_header_id|>user<|end_header_id|>
|
| 38 |
+
|
| 39 |
+
{question}<|eot_id|><|start_header_id|>assistant<|end_header_id|>
|
| 40 |
+
""")
|
| 41 |
# llm = prompt | HuggingFacePipeline.from_model_id(
|
| 42 |
# model_id="Qwen/Qwen2-1.5B-Instruct",
|
| 43 |
# task="text-generation",
|
|
|
|
| 47 |
# },
|
| 48 |
# )
|
| 49 |
|
| 50 |
+
llama = HuggingFaceEndpoint(
|
| 51 |
+
repo_id="meta-llama/Meta-Llama-3-8B-Instruct",
|
| 52 |
task="text-generation",
|
| 53 |
max_new_tokens=150,
|
| 54 |
do_sample=False,
|
| 55 |
)
|
| 56 |
|
| 57 |
+
qwen = HuggingFaceEndpoint(
|
| 58 |
+
repo_id="Qwen/Qwen1.5-4B-Chat",
|
| 59 |
task="text-generation",
|
| 60 |
max_new_tokens=150,
|
| 61 |
do_sample=False,
|
| 62 |
)
|
| 63 |
|
| 64 |
+
llm = prompt_qwen | qwen
|
| 65 |
+
|
| 66 |
+
llm2 = prompt_llama | llama
|
| 67 |
+
|
| 68 |
# llm = prompt | CustomLLM(repo_id="Qwen/Qwen-VL-Chat", model_type='text-generation', api_token=API_TOKEN, max_new_tokens=150).bind(stop=['<|im_end|>'])
|
| 69 |
|
| 70 |
|
|
|
|
| 80 |
|
| 81 |
@app.post("/conversation2")
|
| 82 |
async def conversation2(data : ConversationPost):
|
| 83 |
+
return {"output":llm2.invoke({"question":data.question})}
|
| 84 |
+
|
| 85 |
+
|
| 86 |
+
@app.post("/inference")
|
| 87 |
+
async def inference(data : ConversationPost):
|
| 88 |
+
if data.with_template == 'llama':
|
| 89 |
+
out = llm2.invoke(data.question)
|
| 90 |
+
elif data.with_template == 'qwen':
|
| 91 |
+
out = llm.invoke(data.question)
|
| 92 |
+
else:
|
| 93 |
+
out = llama.invoke(data.question)
|
| 94 |
+
return {"output":out}
|