Spaces:
Running
on
Zero
Running
on
Zero
Fix request header
Browse files
app.py
CHANGED
|
@@ -38,7 +38,8 @@ def process_request(url, question, press_name, pipe_name, compression_ratio):
|
|
| 38 |
|
| 39 |
# Fetch the Wikipedia article
|
| 40 |
try:
|
| 41 |
-
|
|
|
|
| 42 |
except requests.exceptions.RequestException as e:
|
| 43 |
return f"Error fetching the Wikipedia article: {str(e)}", -1, -1
|
| 44 |
|
|
@@ -46,6 +47,8 @@ def process_request(url, question, press_name, pipe_name, compression_ratio):
|
|
| 46 |
# Parse the Wikipedia HTML
|
| 47 |
soup = BeautifulSoup(content, "html.parser")
|
| 48 |
context = "".join([p.text for p in soup.find_all("p")]) + "\n\n"
|
|
|
|
|
|
|
| 49 |
|
| 50 |
# Initialize the press
|
| 51 |
press = press_dict[press_name](compression_ratio)
|
|
|
|
| 38 |
|
| 39 |
# Fetch the Wikipedia article
|
| 40 |
try:
|
| 41 |
+
headers = {'User-Agent': 'kvpress/1.0 (https://github.com/NVIDIA/kvpress; [email protected]) requests/2.31.0'}
|
| 42 |
+
content = requests.get(url, headers=headers).content
|
| 43 |
except requests.exceptions.RequestException as e:
|
| 44 |
return f"Error fetching the Wikipedia article: {str(e)}", -1, -1
|
| 45 |
|
|
|
|
| 47 |
# Parse the Wikipedia HTML
|
| 48 |
soup = BeautifulSoup(content, "html.parser")
|
| 49 |
context = "".join([p.text for p in soup.find_all("p")]) + "\n\n"
|
| 50 |
+
if content == "\n\n":
|
| 51 |
+
return f"Error parsing the Wikipedia article", -1, -1
|
| 52 |
|
| 53 |
# Initialize the press
|
| 54 |
press = press_dict[press_name](compression_ratio)
|