Spaces:
Sleeping
Sleeping
File size: 2,116 Bytes
9b5b26a c19d193 6aae614 32ff6bd 8fe992b 9b5b26a 32ff6bd 0fbeaf3 32ff6bd 6aae614 ae7a494 e121372 bf6d34c 29ec968 fe328e0 13d500a 8c01ffb 9b5b26a 8c01ffb 861422e 9b5b26a 8c01ffb 8fe992b 0fbeaf3 8c01ffb 861422e 8fe992b 9b5b26a 8c01ffb |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 |
from smolagents import CodeAgent,DuckDuckGoSearchTool, HfApiModel,load_tool,tool
import datetime
import requests
import pytz
import yaml
from tools.final_answer import FinalAnswerTool
import giphy_client
from giphy_client.rest import ApiException
import random
from Gradio_UI import GradioUI
API_KEY = "TUIRcl9MzEBwR4p3eoF8LmMmwQQXXtPq"
api = giphy_client.DefaultApi()
@tool
def get_gif(query: str) -> str:
"""Fetch a single GIF URL from GIPHY based on a search query.
Args:
query (str): The search term (e.g. "funny cat", "excited", "monday mood").
Returns:
str: The URL of a randomly selected GIF that matches the query,
or None if no results are found or an API error occurs.
Example:
>>> get_gif("funny dog")
'https://media2.giphy.com/media/l0MYt5jPR6QX5pnqM/giphy.gif'
"""
try:
res = api.gifs_search_get(API_KEY, q=query, limit=10, rating="pg")
if not res.data:
return None
gif = random.choice(res.data)
return gif.images.original.url
except ApiException as e:
print("Giphy API error:", e)
return None
final_answer = FinalAnswerTool()
# If the agent does not answer, the model is overloaded, please use another model or the following Hugging Face Endpoint that also contains qwen2.5 coder:
# model_id='https://pflgm2locj2t89co.us-east-1.aws.endpoints.huggingface.cloud'
model = HfApiModel(
max_tokens=2096,
temperature=0.5,
model_id='Qwen/Qwen2.5-Coder-32B-Instruct',# it is possible that this model may be overloaded
custom_role_conversions=None,
)
# Import tool from Hub
image_generation_tool = load_tool("agents-course/text-to-image", trust_remote_code=True)
with open("prompts.yaml", 'r') as stream:
prompt_templates = yaml.safe_load(stream)
agent = CodeAgent(
model=model,
tools=[final_answer, get_gif], ## add your tools here (don't remove final answer)
max_steps=6,
verbosity_level=1,
grammar=None,
planning_interval=None,
name=None,
description=None,
prompt_templates=prompt_templates
)
GradioUI(agent).launch() |