Spaces:
Running
on
T4
Running
on
T4
Update app.py
Browse files
app.py
CHANGED
|
@@ -29,10 +29,7 @@ notice = "<p><center>Acknowledgment: This demo is powered by GPU resources suppo
|
|
| 29 |
|
| 30 |
# setup
|
| 31 |
def setup():
|
| 32 |
-
device = ("cuda" if torch.cuda.is_available() else "cpu")
|
| 33 |
img_pipe = DDPMPipeline.from_pretrained("yuntian-deng/latex2im_ss_finetunegptneo")
|
| 34 |
-
img_pipe.to(device)
|
| 35 |
-
|
| 36 |
model_type = "EleutherAI/gpt-neo-125M"
|
| 37 |
#encoder = AutoModel.from_pretrained(model_type).to(device)
|
| 38 |
encoder = img_pipe.unet.text_encoder
|
|
@@ -47,6 +44,8 @@ def setup():
|
|
| 47 |
eos_id = tokenizer.encode(tokenizer.eos_token)[0]
|
| 48 |
@spaces.GPU
|
| 49 |
def forward_encoder(latex):
|
|
|
|
|
|
|
| 50 |
encoded = tokenizer(latex, return_tensors='pt', truncation=True, max_length=1024)
|
| 51 |
input_ids = encoded['input_ids']
|
| 52 |
input_ids = torch.cat((input_ids, torch.LongTensor([eos_id,]).unsqueeze(0)), dim=-1)
|
|
|
|
| 29 |
|
| 30 |
# setup
|
| 31 |
def setup():
|
|
|
|
| 32 |
img_pipe = DDPMPipeline.from_pretrained("yuntian-deng/latex2im_ss_finetunegptneo")
|
|
|
|
|
|
|
| 33 |
model_type = "EleutherAI/gpt-neo-125M"
|
| 34 |
#encoder = AutoModel.from_pretrained(model_type).to(device)
|
| 35 |
encoder = img_pipe.unet.text_encoder
|
|
|
|
| 44 |
eos_id = tokenizer.encode(tokenizer.eos_token)[0]
|
| 45 |
@spaces.GPU
|
| 46 |
def forward_encoder(latex):
|
| 47 |
+
device = ("cuda" if torch.cuda.is_available() else "cpu")
|
| 48 |
+
img_pipe.to(device)
|
| 49 |
encoded = tokenizer(latex, return_tensors='pt', truncation=True, max_length=1024)
|
| 50 |
input_ids = encoded['input_ids']
|
| 51 |
input_ids = torch.cat((input_ids, torch.LongTensor([eos_id,]).unsqueeze(0)), dim=-1)
|