File size: 2,337 Bytes
ab02957
71a012f
 
 
 
46b89af
e1500b4
f3e7ebc
c53942e
01e87d5
c53942e
 
 
01e87d5
ebf1864
01e87d5
 
 
 
 
 
 
c53942e
f3e7ebc
 
74ac718
f3e7ebc
01e87d5
e1500b4
c53942e
 
 
 
 
 
f3e7ebc
ca624af
46b89af
71a012f
46b89af
 
 
ab02957
46b89af
 
 
 
 
 
 
 
 
 
 
 
ab02957
71a012f
 
ca624af
c7598d9
e468ee0
9fc0643
ab02957
836f4b5
ab02957
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
import gradio as gr
import os
os.environ["KERAS_BACKEND"] = "tensorflow"                                                                           
import keras
import keras_nlp
import utils

css = """
html, body {
    margin: 0;
    padding: 0;
    height: 100%;
    overflow: hidden;
}

body::before {
    content: '';
    position: fixed;
    top: 0;
    left: 0;
    width: 100vw;
    height: 100vh;
    background-image: url('https://stsci-opo.org/STScI-01J5E849R5W27ZZ2C3QAE9ET75.png');
    background-size: cover;
    background-repeat: no-repeat;
    opacity: 0.35;             /* Faint background image */
    background-position: center;
    z-index: -1;    /* Keep the background behind text */
}
.gradio-container {
    display: flex;
    justify-content: center;
    align-items: center;
    height: 100vh;  /* Ensure the content is vertically centered */
}
"""

#keras.config.set_floatx("bfloat16") uncomment to run at half precision.
gemma_lm = keras_nlp.models.CausalLM.from_preset("hf://sultan-hassan/CosmoGemma_2b_en")
chat = utils.ChatState(gemma_lm) # adding a chat helper to manage the conversation state (modified version from https://github.com/google-gemini/gemma-cookbook/blob/main/Gemma/Keras_Gemma_2_Quickstart_Chat.ipynb)

def launch(message):

    # Uncomment for QA system without chat history/memory
    #template = "Instruction:\n{instruction}\n\nResponse:\n{response}"
    #prompt = template.format(
    #    instruction=message,                                                                   
    #    response="",
    #)
    #out = gemma_lm.generate(prompt, max_length=1024)
    #ind = out.index('\n\nResponse:\n') + len('\n\nResponse:\n')
    #return out[ind:]
    
    # This is to convert QA system to a chatbot
    return chat.send_message(message)

iface = gr.Interface(launch,
                     inputs="text",
                     outputs="text",
                     css=css,
                     title="Hey I am CosmoGemma 👋 I can answer cosmology questions from astroph.CO research articles. Try me :)",
                     description="Gemma_2b_en fine-tuned on QA pairs (~3.5k) generated from Cosmology and Nongalactic Astrophysics articles (arXiv astro-ph.CO) from 2018-2022 and tested on QA pairs (~1k) generated from 2023 articles, scoring over 75% accuracy.")

iface.launch()