Spaces:
Runtime error
Runtime error
Ahsen Khaliq
commited on
Commit
Β·
a87d0e8
1
Parent(s):
fe3881c
usage
Browse files
app.py
CHANGED
|
@@ -6,7 +6,7 @@ import math
|
|
| 6 |
from pathlib import Path
|
| 7 |
import sys
|
| 8 |
sys.path.insert(1, './taming-transformers')
|
| 9 |
-
|
| 10 |
from base64 import b64encode
|
| 11 |
from omegaconf import OmegaConf
|
| 12 |
from PIL import Image
|
|
@@ -24,8 +24,14 @@ import imageio
|
|
| 24 |
from PIL import ImageFile, Image
|
| 25 |
ImageFile.LOAD_TRUNCATED_IMAGES = True
|
| 26 |
import gradio as gr
|
| 27 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
| 28 |
|
|
|
|
|
|
|
| 29 |
def sinc(x):
|
| 30 |
return torch.where(x != 0, torch.sin(math.pi * x) / (math.pi * x), x.new_ones([]))
|
| 31 |
def lanczos(x, a):
|
|
@@ -290,8 +296,10 @@ def inference(text, seed, step_size, max_iterations, width, height):
|
|
| 290 |
losses_str = ', '.join(f'{loss.item():g}' for loss in losses)
|
| 291 |
tqdm.write(f'i: {i}, loss: {sum(losses).item():g}, losses: {losses_str}')
|
| 292 |
out = synth(z)
|
| 293 |
-
|
| 294 |
-
|
|
|
|
|
|
|
| 295 |
def ascend_txt():
|
| 296 |
# global i
|
| 297 |
out = synth(z)
|
|
@@ -338,11 +346,6 @@ def load_image( infilename ) :
|
|
| 338 |
data = np.asarray( img, dtype="int32" )
|
| 339 |
return data
|
| 340 |
def throttled_inference(text, seed, step_size, max_iterations, width, height):
|
| 341 |
-
t = torch.cuda.get_device_properties(0).total_memory
|
| 342 |
-
r = torch.cuda.memory_reserved(0)
|
| 343 |
-
a = torch.cuda.memory_allocated(0)
|
| 344 |
-
f = r-a # free inside reserved
|
| 345 |
-
print(f)
|
| 346 |
global inferences_running
|
| 347 |
current = inferences_running
|
| 348 |
if current >= 2:
|
|
|
|
| 6 |
from pathlib import Path
|
| 7 |
import sys
|
| 8 |
sys.path.insert(1, './taming-transformers')
|
| 9 |
+
from IPython import display
|
| 10 |
from base64 import b64encode
|
| 11 |
from omegaconf import OmegaConf
|
| 12 |
from PIL import Image
|
|
|
|
| 24 |
from PIL import ImageFile, Image
|
| 25 |
ImageFile.LOAD_TRUNCATED_IMAGES = True
|
| 26 |
import gradio as gr
|
| 27 |
+
import nvidia_smi
|
| 28 |
+
|
| 29 |
+
nvidia_smi.nvmlInit()
|
| 30 |
+
handle = nvidia_smi.nvmlDeviceGetHandleByIndex(0)
|
| 31 |
+
# card id 0 hardcoded here, there is also a call to get all available card ids, so we could iterate
|
| 32 |
|
| 33 |
+
|
| 34 |
+
torch.hub.download_url_to_file('https://i.imgur.com/WEHmKef.jpg', 'gpu.jpg')
|
| 35 |
def sinc(x):
|
| 36 |
return torch.where(x != 0, torch.sin(math.pi * x) / (math.pi * x), x.new_ones([]))
|
| 37 |
def lanczos(x, a):
|
|
|
|
| 296 |
losses_str = ', '.join(f'{loss.item():g}' for loss in losses)
|
| 297 |
tqdm.write(f'i: {i}, loss: {sum(losses).item():g}, losses: {losses_str}')
|
| 298 |
out = synth(z)
|
| 299 |
+
TF.to_pil_image(out[0].cpu()).save('progress.png')
|
| 300 |
+
display.display(display.Image('progress.png'))
|
| 301 |
+
res = nvidia_smi.nvmlDeviceGetUtilizationRates(handle)
|
| 302 |
+
print(f'gpu: {res.gpu}%, gpu-mem: {res.memory}%')
|
| 303 |
def ascend_txt():
|
| 304 |
# global i
|
| 305 |
out = synth(z)
|
|
|
|
| 346 |
data = np.asarray( img, dtype="int32" )
|
| 347 |
return data
|
| 348 |
def throttled_inference(text, seed, step_size, max_iterations, width, height):
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 349 |
global inferences_running
|
| 350 |
current = inferences_running
|
| 351 |
if current >= 2:
|