import gradio as gr import os os.environ["KERAS_BACKEND"] = "tensorflow" import keras import keras_nlp import utils css = """ html, body { margin: 0; padding: 0; height: 100%; overflow: hidden; } body::before { content: ''; position: fixed; top: 0; left: 0; width: 100vw; height: 100vh; background-image: url('https://stsci-opo.org/STScI-01J5E849R5W27ZZ2C3QAE9ET75.png'); background-size: cover; background-repeat: no-repeat; opacity: 0.35; /* Faint background image */ background-position: center; z-index: -1; /* Keep the background behind text */ } .gradio-container { display: flex; justify-content: center; align-items: center; height: 100vh; /* Ensure the content is vertically centered */ } """ #keras.config.set_floatx("bfloat16") uncomment to run at half precision. gemma_lm = keras_nlp.models.CausalLM.from_preset("hf://sultan-hassan/CosmoGemma_2b_en") chat = utils.ChatState(gemma_lm) # adding a chat helper to manage the conversation state (modified version from https://github.com/google-gemini/gemma-cookbook/blob/main/Gemma/Keras_Gemma_2_Quickstart_Chat.ipynb) def launch(message): # Uncomment for QA system without chat history/memory #template = "Instruction:\n{instruction}\n\nResponse:\n{response}" #prompt = template.format( # instruction=message, # response="", #) #out = gemma_lm.generate(prompt, max_length=1024) #ind = out.index('\n\nResponse:\n') + len('\n\nResponse:\n') #return out[ind:] # This is to convert QA system to a chatbot return chat.send_message(message) iface = gr.Interface(launch, inputs="text", outputs="text", css=css, title="Hey I am CosmoGemma 👋 I can answer cosmology questions from astroph.CO research articles. Try me :)", description="Gemma_2b_en fine-tuned on QA pairs (~3.5k) generated from Cosmology and Nongalactic Astrophysics articles (arXiv astro-ph.CO) from 2018-2022 and tested on QA pairs (~1k) generated from 2023 articles, scoring over 75% accuracy.") iface.launch()