repo_name
stringlengths 7
65
| path
stringlengths 5
185
| copies
stringlengths 1
4
| size
stringlengths 4
6
| content
stringlengths 977
990k
| license
stringclasses 14
values | hash
stringlengths 32
32
| line_mean
float64 7.18
99.4
| line_max
int64 31
999
| alpha_frac
float64 0.25
0.95
| ratio
float64 1.5
7.84
| autogenerated
bool 1
class | config_or_test
bool 2
classes | has_no_keywords
bool 2
classes | has_few_assignments
bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
mozilla/normandy
|
contract-tests/v3_api/test_group_update.py
|
1
|
1210
|
import uuid
from support.assertions import assert_valid_schema
from urllib.parse import urljoin
def test_group_update(conf, requests_session, headers):
# Create a new group
data = {"name": str(uuid.uuid4())}
response = requests_session.post(
urljoin(conf.getoption("server"), "/api/v3/group/"), headers=headers, data=data
)
assert response.status_code == 201
assert_valid_schema(response.json())
group_data = response.json()
group_id = group_data["id"]
# Verify group was stored and contains expected data
response = requests_session.get(
urljoin(conf.getoption("server"), "/api/v3/group/{}/".format(group_id)), headers=headers
)
group_data = response.json()
assert response.status_code == 200
assert_valid_schema(response.json())
# Use the update to change the name
updated_data = {"name": str(uuid.uuid4())}
response = requests_session.put(
urljoin(conf.getoption("server"), "/api/v3/group/{}/".format(group_id)),
headers=headers,
data=updated_data,
)
assert response.status_code == 200
assert_valid_schema(response.json())
assert response.json()["name"] == updated_data["name"]
|
mpl-2.0
|
305dad0d6a7a428344d25835b0331542
| 33.571429
| 96
| 0.661983
| 3.74613
| false
| false
| false
| false
|
mozilla/normandy
|
normandy/conftest.py
|
1
|
3099
|
from django.core.management import call_command
from django.db import connection
from django.db.migrations.executor import MigrationExecutor
import pytest
import requests_mock
from graphene.test import Client as GrapheneClient
from rest_framework.test import APIClient
from normandy.schema import schema as normandy_schema
from normandy.base.tests import UserFactory
from normandy.recipes import geolocation as geolocation_module
from normandy.recipes.tests import fake_sign
@pytest.fixture
def api_client():
"""Fixture to provide a DRF API client."""
user = UserFactory(is_superuser=True)
client = APIClient()
client.force_authenticate(user=user)
return client
@pytest.fixture
def gql_client():
"""Fixture to provide a Graphene client."""
client = GrapheneClient(normandy_schema)
return client
@pytest.fixture
def geolocation():
"""Fixture to load geolocation data."""
geolocation_module.load_geoip_database()
if geolocation_module.geoip_reader is None:
pytest.skip()
else:
return geolocation_module
@pytest.fixture
def mocked_autograph(mocker):
mocked = mocker.patch("normandy.recipes.models.Autographer")
mocked.return_value.sign_data.side_effect = fake_sign
return mocked
@pytest.fixture
def mocked_remotesettings(mocker):
return mocker.patch("normandy.recipes.models.RemoteSettings")
@pytest.fixture
def rs_settings(settings):
settings.REMOTE_SETTINGS_URL = "https://remotesettings.example.com/v1"
settings.REMOTE_SETTINGS_USERNAME = "normandy"
settings.REMOTE_SETTINGS_PASSWORD = "n0rm4ndy"
return settings
@pytest.fixture()
def migrations(transactional_db):
"""
This fixture returns a helper object to test Django data migrations.
Based on: https://gist.github.com/bennylope/82a6088c02fefdd47e18f3c04ec167af
"""
class Migrator(object):
def migrate(self, app, to):
migration = [(app, to)]
executor = MigrationExecutor(connection)
executor.migrate(migration)
return executor.loader.project_state(migration).apps
def reset(self):
call_command("migrate", no_input=True)
return Migrator()
@pytest.fixture
def requestsmock():
"""Return a context where requests are all mocked.
Usage::
def test_something(requestsmock):
requestsmock.get(
'https://example.com/path'
content=b'The content'
)
# Do stuff that involves requests.get('http://example.com/path')
"""
with requests_mock.mock() as m:
yield m
@pytest.fixture
def storage(settings):
settings.DEFAULT_FILE_STORAGE = "normandy.base.storage.NormandyInMemoryStorage"
from django.core.files.storage import default_storage
yield default_storage
dirs_to_delete = ["/"]
while len(dirs_to_delete) > 0:
dir_path = dirs_to_delete.pop()
paths, new_dirs = default_storage.listdir(dir_path)
dirs_to_delete.extend(new_dirs)
for path in paths:
default_storage.delete(path)
|
mpl-2.0
|
6ff28baceb93bf94e973dfee7c503caa
| 26.669643
| 83
| 0.695708
| 3.868914
| false
| true
| false
| false
|
mozilla/normandy
|
normandy/recipes/migrations/0005_auto_20180503_2146.py
|
1
|
2487
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.11 on 2018-05-03 21:46
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import django.utils.timezone
class Migration(migrations.Migration):
dependencies = [("recipes", "0004_auto_20180502_2340")]
operations = [
migrations.RemoveField(model_name="approvalrequest", name="revision"),
migrations.RemoveField(model_name="recipe", name="approved_revision"),
migrations.RemoveField(model_name="recipe", name="latest_revision"),
migrations.DeleteModel(name="RecipeRevision"),
migrations.RenameModel("TmpRecipeRevision", "RecipeRevision"),
migrations.AlterField(
model_name="reciperevision",
name="action",
field=models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE,
related_name="recipe_revisions",
to="recipes.Action",
),
),
migrations.AlterField(
model_name="reciperevision",
name="recipe",
field=models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE,
related_name="revisions",
to="recipes.Recipe",
),
),
migrations.AlterField(
model_name="reciperevision",
name="user",
field=models.ForeignKey(
null=True,
on_delete=django.db.models.deletion.SET_NULL,
related_name="recipe_revisions",
to=settings.AUTH_USER_MODEL,
),
),
migrations.RenameField(
model_name="approvalrequest", old_name="tmp_revision", new_name="revision"
),
migrations.RenameField(
model_name="recipe", old_name="approved_tmp_revision", new_name="approved_revision"
),
migrations.RenameField(
model_name="recipe", old_name="latest_tmp_revision", new_name="latest_revision"
),
migrations.AlterField(
model_name="approvalrequest",
name="revision",
field=models.OneToOneField(
default=None,
on_delete=django.db.models.deletion.CASCADE,
related_name="approval_request",
to="recipes.RecipeRevision",
),
preserve_default=False,
),
]
|
mpl-2.0
|
84226085f1856f60fffbc7d1658a6032
| 35.043478
| 95
| 0.577805
| 4.605556
| false
| false
| false
| false
|
mozilla/normandy
|
contract-tests/v3_api/test_approval_request_close.py
|
1
|
1496
|
from support.assertions import assert_valid_schema
from support.helpers import new_recipe
from urllib.parse import urljoin
def test_approval_request_close(conf, requests_session, headers):
# Get an action we can work with
action_response = requests_session.get(
urljoin(conf.getoption("server"), "/api/v3/action/"), headers=headers
)
data = action_response.json()
action_id = data["results"][0]["id"]
# Create a recipe
recipe_details = new_recipe(requests_session, action_id, conf.getoption("server"), headers)
# Create an approval request
response = requests_session.post(
urljoin(
conf.getoption("server"),
"/api/v3/recipe_revision/{}/request_approval/".format(
recipe_details["latest_revision_id"]
),
),
headers=headers,
)
data = response.json()
approval_id = data["id"]
assert response.status_code != 404
assert_valid_schema(response.json())
# Close the approval request
response = requests_session.post(
urljoin(
conf.getoption("server"), "/api/v3/approval_request/{}/close/".format(approval_id)
),
headers=headers,
)
assert response.status_code == 204
# Verify that is no longer exists
response = requests_session.get(
urljoin(conf.getoption("server"), "/api/v3/approval_request/{}/".format(approval_id)),
headers=headers,
)
assert response.status_code == 404
|
mpl-2.0
|
b8e6270987f62beaf396fe2b9ce6e9d0
| 31.521739
| 95
| 0.637032
| 4.021505
| false
| false
| false
| false
|
mozilla/normandy
|
normandy/recipes/migrations/0014_auto_20190228_1128.py
|
1
|
2576
|
# Generated by Django 2.0.13 on 2019-02-28 11:28
import json
import sys
from urllib.parse import unquote_plus, urlparse
from django.db import migrations
def get_filename_from_url(url):
return unquote_plus(urlparse(url).path.split("/")[-1])
def add_extension_id(apps, schema_editor):
Action = apps.get_model("recipes", "Action")
RecipeRevision = apps.get_model("recipes", "RecipeRevision")
Extension = apps.get_model("studies", "Extension")
failures = []
try:
action = Action.objects.get(name="opt-out-study")
except Action.DoesNotExist:
return # Do nothing since there cannot be any recipes using the opt-out-study action
revisions = RecipeRevision.objects.filter(action_id=action.id)
for revision in revisions:
arguments = json.loads(revision.arguments_json)
url = arguments.get("addonUrl")
filename = get_filename_from_url(url)
try:
extension = Extension.objects.get(xpi=f"extensions/{filename}")
except Extension.DoesNotExist:
failures.append(
{
"filename": filename,
"addon_url": arguments.get("addonUrl"),
"revision_id": revision.id,
"recipe_id": revision.recipe.id,
}
)
else:
arguments["extensionApiId"] = extension.id
revision.arguments_json = json.dumps(arguments)
revision.save()
if failures:
for failure in failures:
sys.stderr.write(f"{failure}\n")
raise Exception("There were failures in this migration.")
def remove_extension_id(apps, schema_editor):
Action = apps.get_model("recipes", "Action")
RecipeRevision = apps.get_model("recipes", "RecipeRevision")
try:
action = Action.objects.get(name="opt-out-study")
except Action.DoesNotExist:
return # Do nothing since there cannot be any recipes using the opt-out-study action
revisions = RecipeRevision.objects.filter(action_id=action.id)
for revision in revisions:
arguments = json.loads(revision.arguments_json)
if "extensionApiId" in arguments:
arguments.pop("extensionApiId")
revision.arguments_json = json.dumps(arguments)
revision.save()
class Migration(migrations.Migration):
dependencies = [
("recipes", "0013_auto_20181018_2049"),
("studies", "0006_extension_hash_algorithm"),
]
operations = [migrations.RunPython(add_extension_id, remove_extension_id)]
|
mpl-2.0
|
0a2d391270318e7b0286c7f6cc490d0f
| 30.802469
| 93
| 0.634705
| 4.088889
| false
| false
| false
| false
|
developmentseed/landsat-util
|
setup.py
|
1
|
1158
|
#!/usr/bin/env python
# Landsat Util
# License: CC0 1.0 Universal
try:
from setuptools import setup
setup_kwargs = {'entry_points': {'console_scripts':['landsat=landsat.landsat:__main__']}}
except ImportError:
from distutils.core import setup
setup_kwargs = {'scripts': ['bin/landsat']}
from landsat import __version__
def readme():
with open('README.rst') as f:
return f.read()
with open('requirements.txt') as fid:
INSTALL_REQUIRES = [l.strip() for l in fid.readlines() if l]
with open('requirements-dev.txt') as fid:
TEST_REQUIRES = [l.strip() for l in fid.readlines() if l]
setup(
name='landsat-util',
version=__version__,
description='A utility to search, download and process Landsat 8' +
' satellite imagery',
long_description=readme(),
author='Development Seed',
author_email='[email protected]',
url='https://github.com/developmentseed/landsat-util',
packages=['landsat'],
include_package_data=True,
license='CCO',
platforms='Posix; MacOS X; Windows',
install_requires=INSTALL_REQUIRES,
tests_require=TEST_REQUIRES,
**setup_kwargs
)
|
cc0-1.0
|
21e308a331aa8210e6c3216ad6ef8e17
| 25.930233
| 93
| 0.66753
| 3.436202
| false
| true
| false
| false
|
developmentseed/landsat-util
|
landsat/mixins.py
|
3
|
2950
|
# Pansharpened Image Process using Rasterio
# Landsat Util
# License: CC0 1.0 Universal
from __future__ import print_function, division, absolute_import
import sys
import subprocess
from termcolor import colored
class VerbosityMixin(object):
"""
Verbosity Mixin that generates beautiful stdout outputs.
"""
verbose = False
def output(self, value, normal=False, color=None, error=False,
arrow=False, indent=None):
""" Handles verbosity of this calls.
if priority is set to 1, the value is printed
if class instance verbose is True, the value is printed
:param value:
a string representing the message to be printed
:type value:
String
:param normal:
if set to true the message is always printed, otherwise it is only shown if verbosity is set
:type normal:
boolean
:param color:
The color of the message, choices: 'red', 'green', 'blue'
:type normal:
String
:param error:
if set to true the message appears in red
:type error:
Boolean
:param arrow:
if set to true an arrow appears before the message
:type arrow:
Boolean
:param indent:
indents the message based on the number provided
:type indent:
Boolean
:returns:
void
"""
if error and value and (normal or self.verbose):
return self._print(value, color='red', indent=indent)
if self.verbose or normal:
return self._print(value, color, arrow, indent)
return
def subprocess(self, argv):
"""
Execute subprocess commands with proper ouput.
This is no longer used in landsat-util
:param argv:
A list of subprocess arguments
:type argv:
List
:returns:
void
"""
if self.verbose:
proc = subprocess.Popen(argv, stderr=subprocess.PIPE)
else:
proc = subprocess.Popen(argv, stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
self.output(proc.stderr.read(), error=True)
return
def exit(self, message):
""" outputs an exit message and exits
:param message:
The message to be outputed
:type message:
String
:returns:
void
"""
self.output(message, normal=True, color="green")
sys.exit()
def _print(self, msg, color=None, arrow=False, indent=None):
""" Print the msg with the color provided. """
if color:
msg = colored(msg, color)
if arrow:
msg = colored('===> ', 'blue') + msg
if indent:
msg = (' ' * indent) + msg
print(msg)
return msg
|
cc0-1.0
|
a023114dc73545a6f35664a0aba26973
| 24.652174
| 104
| 0.554237
| 4.689984
| false
| false
| false
| false
|
rmmh/skybot
|
core/irc.py
|
3
|
10652
|
from __future__ import print_function
from builtins import map
from builtins import object
import re
import socket
import time
import _thread
import queue
from ssl import wrap_socket, CERT_NONE, CERT_REQUIRED, SSLError
DEFAULT_NAME = "skybot"
DEFAULT_REALNAME = "Python bot - http://github.com/rmmh/skybot"
DEFAULT_NICKSERV_NAME = "nickserv"
DEFAULT_NICKSERV_COMMAND = "IDENTIFY %s"
def decode(txt):
for codec in ("utf-8", "iso-8859-1", "shift_jis", "cp1252"):
try:
return txt.decode(codec)
except UnicodeDecodeError:
continue
return txt.decode("utf-8", "ignore")
def censor(text, censored_strings=None):
text = re.sub("[\n\r]+", " ", text)
if not censored_strings:
return text
words = map(re.escape, censored_strings)
pattern = "(%s)" % "|".join(words)
text = re.sub(pattern, "[censored]", text)
return text
class crlf_tcp(object):
"Handles tcp connections that consist of utf-8 lines ending with crlf"
def __init__(self, host, port, timeout=300):
self.ibuffer = b""
self.obuffer = b""
self.oqueue = queue.Queue() # lines to be sent out
self.iqueue = queue.Queue() # lines that were received
self.socket = self.create_socket()
self.host = host
self.port = port
self.timeout = timeout
def create_socket(self):
return socket.socket(socket.AF_INET, socket.TCP_NODELAY)
def run(self):
while True:
try:
self.socket.connect((self.host, self.port))
except socket.timeout:
print("timed out connecting to %s:%s" % (self.host, self.port))
time.sleep(60)
else:
break
_thread.start_new_thread(self.recv_loop, ())
_thread.start_new_thread(self.send_loop, ())
def recv_from_socket(self, nbytes):
return self.socket.recv(nbytes)
def get_timeout_exception_type(self):
return socket.timeout
def handle_receive_exception(self, error, last_timestamp):
if time.time() - last_timestamp > self.timeout:
self.iqueue.put(StopIteration)
self.socket.close()
return True
return False
def recv_loop(self):
last_timestamp = time.time()
while True:
try:
data = self.recv_from_socket(4096)
self.ibuffer += data
if data:
last_timestamp = time.time()
else:
if time.time() - last_timestamp > self.timeout:
self.iqueue.put(StopIteration)
self.socket.close()
return
time.sleep(1)
except (self.get_timeout_exception_type(), socket.error) as e:
if self.handle_receive_exception(e, last_timestamp):
return
continue
while b"\r\n" in self.ibuffer:
line, self.ibuffer = self.ibuffer.split(b"\r\n", 1)
self.iqueue.put(decode(line))
def send_loop(self):
while True:
line = self.oqueue.get().splitlines()[0][:500]
print(">>> %s" % line)
self.obuffer += line.encode("utf-8", "replace") + b"\r\n"
while self.obuffer:
sent = self.socket.send(self.obuffer)
self.obuffer = self.obuffer[sent:]
class crlf_ssl_tcp(crlf_tcp):
"Handles ssl tcp connetions that consist of utf-8 lines ending with crlf"
def __init__(self, host, port, ignore_cert_errors, timeout=300):
self.ignore_cert_errors = ignore_cert_errors
crlf_tcp.__init__(self, host, port, timeout)
def create_socket(self):
return wrap_socket(
crlf_tcp.create_socket(self),
server_side=False,
cert_reqs=CERT_NONE if self.ignore_cert_errors else CERT_REQUIRED,
)
def recv_from_socket(self, nbytes):
return self.socket.read(nbytes)
def get_timeout_exception_type(self):
return SSLError
def handle_receive_exception(self, error, last_timestamp):
return crlf_tcp.handle_receive_exception(self, error, last_timestamp)
def zip_channels(channels):
channels.sort(key=lambda x: " " not in x) # keyed channels first
chans = []
keys = []
for channel in channels:
if " " in channel:
chan, key = channel.split(" ")
chans.append(chan)
keys.append(key)
else:
chans.append(channel)
chans = ",".join(chans)
if keys:
return [chans, ",".join(keys)]
else:
return [chans]
def test_zip_channels():
assert zip_channels(["#a", "#b c", "#d"]) == ["#b,#a,#d", "c"]
assert zip_channels(["#a", "#b"]) == ["#a,#b"]
class IRC(object):
IRC_PREFIX_REM = re.compile(r"(.*?) (.*?) (.*)").match
IRC_NOPROFEIX_REM = re.compile(r"()(.*?) (.*)").match
IRC_NETMASK_REM = re.compile(r":?([^!@]*)!?([^@]*)@?(.*)").match
IRC_PARAM_REF = re.compile(r"(?:^|(?<= ))(:.*|[^ ]+)").findall
"handles the IRC protocol"
# see the docs/ folder for more information on the protocol
def __init__(self, conf):
self.conn = None
self.nick = DEFAULT_NAME
self.user = DEFAULT_NAME
self.realname = DEFAULT_REALNAME
self.user_mode = None
self.server_host = None
self.server_port = 6667
self.server_password = None
self.nickserv_password = None
self.nickserv_name = DEFAULT_NICKSERV_NAME
self.nickserv_command = DEFAULT_NICKSERV_COMMAND
self.channels = []
self.admins = []
self.censored_strings = []
self.out = queue.Queue() # responses from the server are placed here
# format: [rawline, prefix, command, params,
# nick, user, host, paramlist, msg]
self.set_conf(conf)
self.connect()
_thread.start_new_thread(self.parse_loop, ())
def set_conf(self, conf):
self.nick = conf.get("nick", DEFAULT_NAME)
self.user = conf.get("user", DEFAULT_NAME)
self.realname = conf.get("realname", DEFAULT_REALNAME)
self.user_mode = conf.get("mode", None)
self.server_host = conf["server"]
self.server_port = conf.get("port", 6667)
self.server_password = conf.get("server_password", None)
self.nickserv_password = conf.get("nickserv_password", None)
self.nickserv_name = conf.get("nickserv_name", DEFAULT_NICKSERV_NAME)
self.nickserv_command = conf.get("nickserv_command", DEFAULT_NICKSERV_COMMAND)
self.channels = conf.get("channels", [])
self.admins = conf.get("admins", [])
self.censored_strings = conf.get("censored_strings", [])
if self.conn is not None:
self.join_channels()
def create_connection(self):
return crlf_tcp(self.server_host, self.server_port)
def connect(self):
self.conn = self.create_connection()
_thread.start_new_thread(self.conn.run, ())
self.cmd("NICK", [self.nick])
self.cmd("USER", [self.user, "3", "*", self.realname])
if self.server_password:
self.cmd("PASS", [self.server_password])
def parse_loop(self):
while True:
msg = self.conn.iqueue.get()
if msg == StopIteration:
self.connect()
continue
if msg.startswith(":"): # has a prefix
prefix, command, params = self.IRC_PREFIX_REM(msg).groups()
else:
prefix, command, params = self.IRC_NOPROFEIX_REM(msg).groups()
nick, user, host = self.IRC_NETMASK_REM(prefix).groups()
paramlist = self.IRC_PARAM_REF(params)
lastparam = ""
if paramlist:
if paramlist[-1].startswith(":"):
paramlist[-1] = paramlist[-1][1:]
lastparam = paramlist[-1]
self.out.put(
[msg, prefix, command, params, nick, user, host, paramlist, lastparam]
)
if command == "PING":
self.cmd("PONG", paramlist)
def join(self, channel):
self.cmd("JOIN", channel.split(" ")) # [chan, password]
def join_channels(self):
if self.channels:
# TODO: send multiple join commands for large channel lists
self.cmd("JOIN", zip_channels(self.channels))
def msg(self, target, text):
self.cmd("PRIVMSG", [target, text])
def cmd(self, command, params=None):
if params:
params[-1] = ":" + params[-1]
params = [censor(p, self.censored_strings) for p in params]
self.send(command + " " + " ".join(params))
else:
self.send(command)
def send(self, str):
self.conn.oqueue.put(str)
class FakeIRC(IRC):
def __init__(self, conf):
self.set_conf(conf)
self.out = queue.Queue() # responses from the server are placed here
self.f = open(fn, "rb")
_thread.start_new_thread(self.parse_loop, ())
def parse_loop(self):
while True:
msg = decode(self.f.readline()[9:])
if msg == "":
print("!!!!DONE READING FILE!!!!")
return
if msg.startswith(":"): # has a prefix
prefix, command, params = irc_prefix_rem(msg).groups()
else:
prefix, command, params = irc_noprefix_rem(msg).groups()
nick, user, host = irc_netmask_rem(prefix).groups()
paramlist = irc_param_ref(params)
lastparam = ""
if paramlist:
if paramlist[-1].startswith(":"):
paramlist[-1] = paramlist[-1][1:]
lastparam = paramlist[-1]
self.out.put(
[msg, prefix, command, params, nick, user, host, paramlist, lastparam]
)
if command == "PING":
self.cmd("PONG", [params])
def cmd(self, command, params=None):
pass
class SSLIRC(IRC):
def __init__(self, conf):
super(SSLIRC, self).__init__(conf=conf)
self.server_port = 6697
self.server_ignore_cert = False
def set_conf(self, conf):
super(SSLIRC, self).set_conf(conf)
self.server_port = conf.get("port", 6697)
self.server_ignore_cert = conf.get("ignore_cert", False)
def create_connection(self):
return crlf_ssl_tcp(self.server_host, self.server_port, self.server_ignore_cert)
|
unlicense
|
8a5ef5e7c64db739c054b8d67d63de1f
| 30.329412
| 88
| 0.556515
| 3.705043
| false
| false
| false
| false
|
rmmh/skybot
|
plugins/util/http.py
|
3
|
5942
|
from future.standard_library import hooks
from lxml import etree, html
import binascii
import collections
import hmac
import json
import random
import time
from hashlib import sha1
from builtins import str
from builtins import range
try:
from http.cookiejar import CookieJar
except:
from future.backports.http.cookiejar import CookieJar
with hooks():
import urllib.request, urllib.parse, urllib.error
from urllib.parse import (
quote,
unquote,
urlencode,
urlparse,
parse_qsl,
quote_plus as _quote_plus,
)
from urllib.error import HTTPError, URLError
ua_skybot = "Skybot/1.0 https://github.com/rmmh/skybot"
ua_firefox = (
"Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US; rv:1.8.1.6) "
"Gecko/20070725 Firefox/2.0.0.6"
)
ua_internetexplorer = "Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1)"
def get_cookie_jar():
if not hasattr(get_cookie_jar, "memo"):
get_cookie_jar.memo = CookieJar()
return get_cookie_jar.memo
def clear_expired_cookies():
get_cookie_jar().clear_expired_cookies()
def get(*args, **kwargs):
return open(*args, **kwargs).read().decode("utf-8")
def get_html(*args, **kwargs):
return html.fromstring(open(*args, **kwargs).read())
def get_xml(*args, **kwargs):
return etree.fromstring(open(*args, **kwargs).read())
def get_json(*args, **kwargs):
return json.loads(open(*args, **kwargs).read())
def open(
url,
query_params=None,
post_data=None,
json_data=None,
get_method=None,
cookies=False,
oauth=False,
oauth_keys=None,
headers=None,
**kwargs
):
if query_params is None:
query_params = {}
query_params.update(kwargs)
url = prepare_url(url, query_params)
if post_data and isinstance(post_data, collections.Mapping):
post_data = urllib.parse.urlencode(post_data)
post_data = post_data.encode("UTF-8")
if json_data and isinstance(json_data, dict):
post_data = json.dumps(json_data).encode("utf-8")
request = urllib.request.Request(url, post_data)
if json_data:
request.add_header("Content-Type", "application/json")
if get_method is not None:
request.get_method = lambda: get_method
if headers is not None:
for header_key, header_value in headers.items():
request.add_header(header_key, header_value)
if "User-Agent" not in request.headers:
request.add_header("User-Agent", ua_skybot)
if oauth:
nonce = oauth_nonce()
timestamp = oauth_timestamp()
api_url, req_data = url.split("?")
unsigned_request = oauth_unsigned_request(
nonce, timestamp, req_data, oauth_keys["consumer"], oauth_keys["access"]
)
signature = oauth_sign_request(
"GET",
api_url,
req_data,
unsigned_request,
oauth_keys["consumer_secret"],
oauth_keys["access_secret"],
)
header = oauth_build_header(
nonce, signature, timestamp, oauth_keys["consumer"], oauth_keys["access"]
)
request.add_header("Authorization", header)
if cookies:
opener = urllib.request.build_opener(
urllib.request.HTTPCookieProcessor(get_cookie_jar())
)
else:
opener = urllib.request.build_opener()
return opener.open(request)
def prepare_url(url, queries):
if queries:
scheme, netloc, path, query, fragment = urllib.parse.urlsplit(str(url))
query = dict(urllib.parse.parse_qsl(query))
query.update(queries)
query = urllib.parse.urlencode(
dict((to_utf8(key), to_utf8(value)) for key, value in query.items())
)
url = urllib.parse.urlunsplit((scheme, netloc, path, query, fragment))
return url
def to_utf8(s):
if isinstance(s, str):
return s.encode("utf8", "ignore")
else:
return str(s)
def quote_plus(s):
return _quote_plus(to_utf8(s))
def oauth_nonce():
return "".join([str(random.randint(0, 9)) for i in range(8)])
def oauth_timestamp():
return str(int(time.time()))
def oauth_unsigned_request(nonce, timestamp, req, consumer, token):
d = {
"oauth_consumer_key": consumer,
"oauth_nonce": nonce,
"oauth_signature_method": "HMAC-SHA1",
"oauth_timestamp": timestamp,
"oauth_token": token,
"oauth_version": "1.0",
}
d.update(urllib.parse.parse_qsl(req))
request_items = d.items()
# TODO: Remove this when Python 2 is no longer supported.
# some of the fields are actual string and others are
# a wrapper of str for the python 3 migration.
# Convert them all so that they sort correctly.
request_items = [(str(k), str(v)) for k, v in request_items]
return quote(urllib.parse.urlencode(sorted(request_items, key=lambda key: key[0])))
def oauth_build_header(nonce, signature, timestamp, consumer, token):
d = {
"oauth_consumer_key": consumer,
"oauth_nonce": nonce,
"oauth_signature": signature,
"oauth_signature_method": "HMAC-SHA1",
"oauth_timestamp": timestamp,
"oauth_token": token,
"oauth_version": "1.0",
}
header = "OAuth "
for x in sorted(d, key=lambda key: key[0]):
header += x + '="' + d[x] + '", '
return header[:-1]
def oauth_sign_request(
method, url, params, unsigned_request, consumer_secret, token_secret
):
key = consumer_secret + "&" + token_secret
key = key.encode("utf-8", "replace")
base = method + "&" + quote(url, "") + "&" + unsigned_request
base = base.encode("utf-8", "replace")
hash = hmac.new(key, base, sha1)
signature = quote(binascii.b2a_base64(hash.digest())[:-1])
return signature
def unescape(s):
if not s.strip():
return s
return html.fromstring(s).text_content()
|
unlicense
|
8d528fae968f41036b202e03cfa36487
| 23.861925
| 87
| 0.619152
| 3.560216
| false
| false
| false
| false
|
pytube/pytube
|
pytube/query.py
|
1
|
12622
|
"""This module provides a query interface for media streams and captions."""
from collections.abc import Mapping, Sequence
from typing import Callable, List, Optional, Union
from pytube import Caption, Stream
from pytube.helpers import deprecated
class StreamQuery(Sequence):
"""Interface for querying the available media streams."""
def __init__(self, fmt_streams):
"""Construct a :class:`StreamQuery <StreamQuery>`.
param list fmt_streams:
list of :class:`Stream <Stream>` instances.
"""
self.fmt_streams = fmt_streams
self.itag_index = {int(s.itag): s for s in fmt_streams}
def filter(
self,
fps=None,
res=None,
resolution=None,
mime_type=None,
type=None,
subtype=None,
file_extension=None,
abr=None,
bitrate=None,
video_codec=None,
audio_codec=None,
only_audio=None,
only_video=None,
progressive=None,
adaptive=None,
is_dash=None,
custom_filter_functions=None,
):
"""Apply the given filtering criterion.
:param fps:
(optional) The frames per second.
:type fps:
int or None
:param resolution:
(optional) Alias to ``res``.
:type res:
str or None
:param res:
(optional) The video resolution.
:type resolution:
str or None
:param mime_type:
(optional) Two-part identifier for file formats and format contents
composed of a "type", a "subtype".
:type mime_type:
str or None
:param type:
(optional) Type part of the ``mime_type`` (e.g.: audio, video).
:type type:
str or None
:param subtype:
(optional) Sub-type part of the ``mime_type`` (e.g.: mp4, mov).
:type subtype:
str or None
:param file_extension:
(optional) Alias to ``sub_type``.
:type file_extension:
str or None
:param abr:
(optional) Average bitrate (ABR) refers to the average amount of
data transferred per unit of time (e.g.: 64kbps, 192kbps).
:type abr:
str or None
:param bitrate:
(optional) Alias to ``abr``.
:type bitrate:
str or None
:param video_codec:
(optional) Video compression format.
:type video_codec:
str or None
:param audio_codec:
(optional) Audio compression format.
:type audio_codec:
str or None
:param bool progressive:
Excludes adaptive streams (one file contains both audio and video
tracks).
:param bool adaptive:
Excludes progressive streams (audio and video are on separate
tracks).
:param bool is_dash:
Include/exclude dash streams.
:param bool only_audio:
Excludes streams with video tracks.
:param bool only_video:
Excludes streams with audio tracks.
:param custom_filter_functions:
(optional) Interface for defining complex filters without
subclassing.
:type custom_filter_functions:
list or None
"""
filters = []
if res or resolution:
filters.append(lambda s: s.resolution == (res or resolution))
if fps:
filters.append(lambda s: s.fps == fps)
if mime_type:
filters.append(lambda s: s.mime_type == mime_type)
if type:
filters.append(lambda s: s.type == type)
if subtype or file_extension:
filters.append(lambda s: s.subtype == (subtype or file_extension))
if abr or bitrate:
filters.append(lambda s: s.abr == (abr or bitrate))
if video_codec:
filters.append(lambda s: s.video_codec == video_codec)
if audio_codec:
filters.append(lambda s: s.audio_codec == audio_codec)
if only_audio:
filters.append(
lambda s: (
s.includes_audio_track and not s.includes_video_track
),
)
if only_video:
filters.append(
lambda s: (
s.includes_video_track and not s.includes_audio_track
),
)
if progressive:
filters.append(lambda s: s.is_progressive)
if adaptive:
filters.append(lambda s: s.is_adaptive)
if custom_filter_functions:
filters.extend(custom_filter_functions)
if is_dash is not None:
filters.append(lambda s: s.is_dash == is_dash)
return self._filter(filters)
def _filter(self, filters: List[Callable]) -> "StreamQuery":
fmt_streams = self.fmt_streams
for filter_lambda in filters:
fmt_streams = filter(filter_lambda, fmt_streams)
return StreamQuery(list(fmt_streams))
def order_by(self, attribute_name: str) -> "StreamQuery":
"""Apply a sort order. Filters out stream the do not have the attribute.
:param str attribute_name:
The name of the attribute to sort by.
"""
has_attribute = [
s
for s in self.fmt_streams
if getattr(s, attribute_name) is not None
]
# Check that the attributes have string values.
if has_attribute and isinstance(
getattr(has_attribute[0], attribute_name), str
):
# Try to return a StreamQuery sorted by the integer representations
# of the values.
try:
return StreamQuery(
sorted(
has_attribute,
key=lambda s: int(
"".join(
filter(str.isdigit, getattr(s, attribute_name))
)
), # type: ignore # noqa: E501
)
)
except ValueError:
pass
return StreamQuery(
sorted(has_attribute, key=lambda s: getattr(s, attribute_name))
)
def desc(self) -> "StreamQuery":
"""Sort streams in descending order.
:rtype: :class:`StreamQuery <StreamQuery>`
"""
return StreamQuery(self.fmt_streams[::-1])
def asc(self) -> "StreamQuery":
"""Sort streams in ascending order.
:rtype: :class:`StreamQuery <StreamQuery>`
"""
return self
def get_by_itag(self, itag: int) -> Optional[Stream]:
"""Get the corresponding :class:`Stream <Stream>` for a given itag.
:param int itag:
YouTube format identifier code.
:rtype: :class:`Stream <Stream>` or None
:returns:
The :class:`Stream <Stream>` matching the given itag or None if
not found.
"""
return self.itag_index.get(int(itag))
def get_by_resolution(self, resolution: str) -> Optional[Stream]:
"""Get the corresponding :class:`Stream <Stream>` for a given resolution.
Stream must be a progressive mp4.
:param str resolution:
Video resolution i.e. "720p", "480p", "360p", "240p", "144p"
:rtype: :class:`Stream <Stream>` or None
:returns:
The :class:`Stream <Stream>` matching the given itag or None if
not found.
"""
return self.filter(
progressive=True, subtype="mp4", resolution=resolution
).first()
def get_lowest_resolution(self) -> Optional[Stream]:
"""Get lowest resolution stream that is a progressive mp4.
:rtype: :class:`Stream <Stream>` or None
:returns:
The :class:`Stream <Stream>` matching the given itag or None if
not found.
"""
return (
self.filter(progressive=True, subtype="mp4")
.order_by("resolution")
.first()
)
def get_highest_resolution(self) -> Optional[Stream]:
"""Get highest resolution stream that is a progressive video.
:rtype: :class:`Stream <Stream>` or None
:returns:
The :class:`Stream <Stream>` matching the given itag or None if
not found.
"""
return self.filter(progressive=True).order_by("resolution").last()
def get_audio_only(self, subtype: str = "mp4") -> Optional[Stream]:
"""Get highest bitrate audio stream for given codec (defaults to mp4)
:param str subtype:
Audio subtype, defaults to mp4
:rtype: :class:`Stream <Stream>` or None
:returns:
The :class:`Stream <Stream>` matching the given itag or None if
not found.
"""
return (
self.filter(only_audio=True, subtype=subtype)
.order_by("abr")
.last()
)
def otf(self, is_otf: bool = False) -> "StreamQuery":
"""Filter stream by OTF, useful if some streams have 404 URLs
:param bool is_otf: Set to False to retrieve only non-OTF streams
:rtype: :class:`StreamQuery <StreamQuery>`
:returns: A StreamQuery object with otf filtered streams
"""
return self._filter([lambda s: s.is_otf == is_otf])
def first(self) -> Optional[Stream]:
"""Get the first :class:`Stream <Stream>` in the results.
:rtype: :class:`Stream <Stream>` or None
:returns:
the first result of this query or None if the result doesn't
contain any streams.
"""
try:
return self.fmt_streams[0]
except IndexError:
return None
def last(self):
"""Get the last :class:`Stream <Stream>` in the results.
:rtype: :class:`Stream <Stream>` or None
:returns:
Return the last result of this query or None if the result
doesn't contain any streams.
"""
try:
return self.fmt_streams[-1]
except IndexError:
pass
@deprecated("Get the size of this list directly using len()")
def count(self, value: Optional[str] = None) -> int: # pragma: no cover
"""Get the count of items in the list.
:rtype: int
"""
if value:
return self.fmt_streams.count(value)
return len(self)
@deprecated("This object can be treated as a list, all() is useless")
def all(self) -> List[Stream]: # pragma: no cover
"""Get all the results represented by this query as a list.
:rtype: list
"""
return self.fmt_streams
def __getitem__(self, i: Union[slice, int]):
return self.fmt_streams[i]
def __len__(self) -> int:
return len(self.fmt_streams)
def __repr__(self) -> str:
return f"{self.fmt_streams}"
class CaptionQuery(Mapping):
"""Interface for querying the available captions."""
def __init__(self, captions: List[Caption]):
"""Construct a :class:`Caption <Caption>`.
param list captions:
list of :class:`Caption <Caption>` instances.
"""
self.lang_code_index = {c.code: c for c in captions}
@deprecated(
"This object can be treated as a dictionary, i.e. captions['en']"
)
def get_by_language_code(
self, lang_code: str
) -> Optional[Caption]: # pragma: no cover
"""Get the :class:`Caption <Caption>` for a given ``lang_code``.
:param str lang_code:
The code that identifies the caption language.
:rtype: :class:`Caption <Caption>` or None
:returns:
The :class:`Caption <Caption>` matching the given ``lang_code`` or
None if it does not exist.
"""
return self.lang_code_index.get(lang_code)
@deprecated("This object can be treated as a dictionary")
def all(self) -> List[Caption]: # pragma: no cover
"""Get all the results represented by this query as a list.
:rtype: list
"""
return list(self.lang_code_index.values())
def __getitem__(self, i: str):
return self.lang_code_index[i]
def __len__(self) -> int:
return len(self.lang_code_index)
def __iter__(self):
return iter(self.lang_code_index.values())
def __repr__(self) -> str:
return f"{self.lang_code_index}"
|
unlicense
|
4cd3b1805a5fb6ff5a344f4ebe671490
| 28.980998
| 81
| 0.552131
| 4.29612
| false
| false
| false
| false
|
pytube/pytube
|
pytube/innertube.py
|
1
|
11658
|
"""This module is designed to interact with the innertube API.
This module is NOT intended to be used directly by end users, as each of the
interfaces returns raw results. These should instead be parsed to extract
the useful information for the end user.
"""
# Native python imports
import json
import os
import pathlib
import time
from urllib import parse
# Local imports
from pytube import request
# YouTube on TV client secrets
_client_id = '861556708454-d6dlm3lh05idd8npek18k6be8ba3oc68.apps.googleusercontent.com'
_client_secret = 'SboVhoG9s0rNafixCSGGKXAT'
# Extracted API keys -- unclear what these are linked to.
_api_keys = [
'AIzaSyAO_FJ2SlqU8Q4STEHLGCilw_Y9_11qcW8',
'AIzaSyCtkvNIR1HCEwzsqK6JuE6KqpyjusIRI30',
'AIzaSyA8eiZmM1FaDVjRy-df2KTyQ_vz_yYM39w',
'AIzaSyC8UYZpvA2eknNex0Pjid0_eTLJoDu6los',
'AIzaSyCjc_pVEDi4qsv5MtC2dMXzpIaDoRFLsxw',
'AIzaSyDHQ9ipnphqTzDqZsbtd8_Ru4_kiKVQe2k'
]
_default_clients = {
'WEB': {
'context': {
'client': {
'clientName': 'WEB',
'clientVersion': '2.20200720.00.02'
}
},
'api_key': 'AIzaSyAO_FJ2SlqU8Q4STEHLGCilw_Y9_11qcW8'
},
'ANDROID': {
'context': {
'client': {
'clientName': 'ANDROID',
'clientVersion': '16.20'
}
},
'api_key': 'AIzaSyAO_FJ2SlqU8Q4STEHLGCilw_Y9_11qcW8'
},
'WEB_EMBED': {
'context': {
'client': {
'clientName': 'WEB',
'clientVersion': '2.20210721.00.00',
'clientScreen': 'EMBED'
}
},
'api_key': 'AIzaSyAO_FJ2SlqU8Q4STEHLGCilw_Y9_11qcW8'
},
'ANDROID_EMBED': {
'context': {
'client': {
'clientName': 'ANDROID',
'clientVersion': '16.20',
'clientScreen': 'EMBED'
}
},
'api_key': 'AIzaSyAO_FJ2SlqU8Q4STEHLGCilw_Y9_11qcW8'
}
}
_token_timeout = 1800
_cache_dir = pathlib.Path(__file__).parent.resolve() / '__cache__'
_token_file = os.path.join(_cache_dir, 'tokens.json')
class InnerTube:
"""Object for interacting with the innertube API."""
def __init__(self, client='ANDROID', use_oauth=False, allow_cache=True):
"""Initialize an InnerTube object.
:param str client:
Client to use for the object.
Default to web because it returns the most playback types.
:param bool use_oauth:
Whether or not to authenticate to YouTube.
:param bool allow_cache:
Allows caching of oauth tokens on the machine.
"""
self.context = _default_clients[client]['context']
self.api_key = _default_clients[client]['api_key']
self.access_token = None
self.refresh_token = None
self.use_oauth = use_oauth
self.allow_cache = allow_cache
# Stored as epoch time
self.expires = None
# Try to load from file if specified
if self.use_oauth and self.allow_cache:
# Try to load from file if possible
if os.path.exists(_token_file):
with open(_token_file) as f:
data = json.load(f)
self.access_token = data['access_token']
self.refresh_token = data['refresh_token']
self.expires = data['expires']
self.refresh_bearer_token()
def cache_tokens(self):
"""Cache tokens to file if allowed."""
if not self.allow_cache:
return
data = {
'access_token': self.access_token,
'refresh_token': self.refresh_token,
'expires': self.expires
}
if not os.path.exists(_cache_dir):
os.mkdir(_cache_dir)
with open(_token_file, 'w') as f:
json.dump(data, f)
def refresh_bearer_token(self, force=False):
"""Refreshes the OAuth token if necessary.
:param bool force:
Force-refresh the bearer token.
"""
if not self.use_oauth:
return
# Skip refresh if it's not necessary and not forced
if self.expires > time.time() and not force:
return
# Subtracting 30 seconds is arbitrary to avoid potential time discrepencies
start_time = int(time.time() - 30)
data = {
'client_id': _client_id,
'client_secret': _client_secret,
'grant_type': 'refresh_token',
'refresh_token': self.refresh_token
}
response = request._execute_request(
'https://oauth2.googleapis.com/token',
'POST',
headers={
'Content-Type': 'application/json'
},
data=data
)
response_data = json.loads(response.read())
self.access_token = response_data['access_token']
self.expires = start_time + response_data['expires_in']
self.cache_tokens()
def fetch_bearer_token(self):
"""Fetch an OAuth token."""
# Subtracting 30 seconds is arbitrary to avoid potential time discrepencies
start_time = int(time.time() - 30)
data = {
'client_id': _client_id,
'scope': 'https://www.googleapis.com/auth/youtube'
}
response = request._execute_request(
'https://oauth2.googleapis.com/device/code',
'POST',
headers={
'Content-Type': 'application/json'
},
data=data
)
response_data = json.loads(response.read())
verification_url = response_data['verification_url']
user_code = response_data['user_code']
print(f'Please open {verification_url} and input code {user_code}')
input('Press enter when you have completed this step.')
data = {
'client_id': _client_id,
'client_secret': _client_secret,
'device_code': response_data['device_code'],
'grant_type': 'urn:ietf:params:oauth:grant-type:device_code'
}
response = request._execute_request(
'https://oauth2.googleapis.com/token',
'POST',
headers={
'Content-Type': 'application/json'
},
data=data
)
response_data = json.loads(response.read())
self.access_token = response_data['access_token']
self.refresh_token = response_data['refresh_token']
self.expires = start_time + response_data['expires_in']
self.cache_tokens()
@property
def base_url(self):
"""Return the base url endpoint for the innertube API."""
return 'https://www.youtube.com/youtubei/v1'
@property
def base_data(self):
"""Return the base json data to transmit to the innertube API."""
return {
'context': self.context
}
@property
def base_params(self):
"""Return the base query parameters to transmit to the innertube API."""
return {
'key': self.api_key,
'contentCheckOk': True,
'racyCheckOk': True
}
def _call_api(self, endpoint, query, data):
"""Make a request to a given endpoint with the provided query parameters and data."""
# Remove the API key if oauth is being used.
if self.use_oauth:
del query['key']
endpoint_url = f'{endpoint}?{parse.urlencode(query)}'
headers = {
'Content-Type': 'application/json',
}
# Add the bearer token if applicable
if self.use_oauth:
if self.access_token:
self.refresh_bearer_token()
headers['Authorization'] = f'Bearer {self.access_token}'
else:
self.fetch_bearer_token()
headers['Authorization'] = f'Bearer {self.access_token}'
response = request._execute_request(
endpoint_url,
'POST',
headers=headers,
data=data
)
return json.loads(response.read())
def browse(self):
"""Make a request to the browse endpoint.
TODO: Figure out how we can use this
"""
# endpoint = f'{self.base_url}/browse' # noqa:E800
...
# return self._call_api(endpoint, query, self.base_data) # noqa:E800
def config(self):
"""Make a request to the config endpoint.
TODO: Figure out how we can use this
"""
# endpoint = f'{self.base_url}/config' # noqa:E800
...
# return self._call_api(endpoint, query, self.base_data) # noqa:E800
def guide(self):
"""Make a request to the guide endpoint.
TODO: Figure out how we can use this
"""
# endpoint = f'{self.base_url}/guide' # noqa:E800
...
# return self._call_api(endpoint, query, self.base_data) # noqa:E800
def next(self):
"""Make a request to the next endpoint.
TODO: Figure out how we can use this
"""
# endpoint = f'{self.base_url}/next' # noqa:E800
...
# return self._call_api(endpoint, query, self.base_data) # noqa:E800
def player(self, video_id):
"""Make a request to the player endpoint.
:param str video_id:
The video id to get player info for.
:rtype: dict
:returns:
Raw player info results.
"""
endpoint = f'{self.base_url}/player'
query = {
'videoId': video_id,
}
query.update(self.base_params)
return self._call_api(endpoint, query, self.base_data)
def search(self, search_query, continuation=None):
"""Make a request to the search endpoint.
:param str search_query:
The query to search.
:rtype: dict
:returns:
Raw search query results.
"""
endpoint = f'{self.base_url}/search'
query = {
'query': search_query
}
query.update(self.base_params)
data = {}
if continuation:
data['continuation'] = continuation
data.update(self.base_data)
return self._call_api(endpoint, query, data)
def verify_age(self, video_id):
"""Make a request to the age_verify endpoint.
Notable examples of the types of video this verification step is for:
* https://www.youtube.com/watch?v=QLdAhwSBZ3w
* https://www.youtube.com/watch?v=hc0ZDaAZQT0
:param str video_id:
The video id to get player info for.
:rtype: dict
:returns:
Returns information that includes a URL for bypassing certain restrictions.
"""
endpoint = f'{self.base_url}/verify_age'
data = {
'nextEndpoint': {
'urlEndpoint': {
'url': f'/watch?v={video_id}'
}
},
'setControvercy': True
}
data.update(self.base_data)
result = self._call_api(endpoint, self.base_params, data)
return result
def get_transcript(self, video_id):
"""Make a request to the get_transcript endpoint.
This is likely related to captioning for videos, but is currently untested.
"""
endpoint = f'{self.base_url}/get_transcript'
query = {
'videoId': video_id,
}
query.update(self.base_params)
result = self._call_api(endpoint, query, self.base_data)
return result
|
unlicense
|
ba0aec650f36c17aa80c51e0edb576d3
| 31.473538
| 93
| 0.556613
| 3.81979
| false
| false
| false
| false
|
pytube/pytube
|
pytube/contrib/playlist.py
|
1
|
14204
|
"""Module to download a complete playlist from a youtube channel."""
import json
import logging
from collections.abc import Sequence
from datetime import date, datetime
from typing import Dict, Iterable, List, Optional, Tuple, Union
from pytube import extract, request, YouTube
from pytube.helpers import cache, DeferredGeneratorList, install_proxy, uniqueify
logger = logging.getLogger(__name__)
class Playlist(Sequence):
"""Load a YouTube playlist with URL"""
def __init__(self, url: str, proxies: Optional[Dict[str, str]] = None):
if proxies:
install_proxy(proxies)
self._input_url = url
# These need to be initialized as None for the properties.
self._html = None
self._ytcfg = None
self._initial_data = None
self._sidebar_info = None
self._playlist_id = None
@property
def playlist_id(self):
"""Get the playlist id.
:rtype: str
"""
if self._playlist_id:
return self._playlist_id
self._playlist_id = extract.playlist_id(self._input_url)
return self._playlist_id
@property
def playlist_url(self):
"""Get the base playlist url.
:rtype: str
"""
return f"https://www.youtube.com/playlist?list={self.playlist_id}"
@property
def html(self):
"""Get the playlist page html.
:rtype: str
"""
if self._html:
return self._html
self._html = request.get(self.playlist_url)
return self._html
@property
def ytcfg(self):
"""Extract the ytcfg from the playlist page html.
:rtype: dict
"""
if self._ytcfg:
return self._ytcfg
self._ytcfg = extract.get_ytcfg(self.html)
return self._ytcfg
@property
def initial_data(self):
"""Extract the initial data from the playlist page html.
:rtype: dict
"""
if self._initial_data:
return self._initial_data
else:
self._initial_data = extract.initial_data(self.html)
return self._initial_data
@property
def sidebar_info(self):
"""Extract the sidebar info from the playlist page html.
:rtype: dict
"""
if self._sidebar_info:
return self._sidebar_info
else:
self._sidebar_info = self.initial_data['sidebar'][
'playlistSidebarRenderer']['items']
return self._sidebar_info
@property
def yt_api_key(self):
"""Extract the INNERTUBE_API_KEY from the playlist ytcfg.
:rtype: str
"""
return self.ytcfg['INNERTUBE_API_KEY']
def _paginate(
self, until_watch_id: Optional[str] = None
) -> Iterable[List[str]]:
"""Parse the video links from the page source, yields the /watch?v=
part from video link
:param until_watch_id Optional[str]: YouTube Video watch id until
which the playlist should be read.
:rtype: Iterable[List[str]]
:returns: Iterable of lists of YouTube watch ids
"""
videos_urls, continuation = self._extract_videos(
json.dumps(extract.initial_data(self.html))
)
if until_watch_id:
try:
trim_index = videos_urls.index(f"/watch?v={until_watch_id}")
yield videos_urls[:trim_index]
return
except ValueError:
pass
yield videos_urls
# Extraction from a playlist only returns 100 videos at a time
# if self._extract_videos returns a continuation there are more
# than 100 songs inside a playlist, so we need to add further requests
# to gather all of them
if continuation:
load_more_url, headers, data = self._build_continuation_url(continuation)
else:
load_more_url, headers, data = None, None, None
while load_more_url and headers and data: # there is an url found
logger.debug("load more url: %s", load_more_url)
# requesting the next page of videos with the url generated from the
# previous page, needs to be a post
req = request.post(load_more_url, extra_headers=headers, data=data)
# extract up to 100 songs from the page loaded
# returns another continuation if more videos are available
videos_urls, continuation = self._extract_videos(req)
if until_watch_id:
try:
trim_index = videos_urls.index(f"/watch?v={until_watch_id}")
yield videos_urls[:trim_index]
return
except ValueError:
pass
yield videos_urls
if continuation:
load_more_url, headers, data = self._build_continuation_url(
continuation
)
else:
load_more_url, headers, data = None, None, None
def _build_continuation_url(self, continuation: str) -> Tuple[str, dict, dict]:
"""Helper method to build the url and headers required to request
the next page of videos
:param str continuation: Continuation extracted from the json response
of the last page
:rtype: Tuple[str, dict, dict]
:returns: Tuple of an url and required headers for the next http
request
"""
return (
(
# was changed to this format (and post requests)
# between 2021.03.02 and 2021.03.03
"https://www.youtube.com/youtubei/v1/browse?key="
f"{self.yt_api_key}"
),
{
"X-YouTube-Client-Name": "1",
"X-YouTube-Client-Version": "2.20200720.00.02",
},
# extra data required for post request
{
"continuation": continuation,
"context": {
"client": {
"clientName": "WEB",
"clientVersion": "2.20200720.00.02"
}
}
}
)
@staticmethod
def _extract_videos(raw_json: str) -> Tuple[List[str], Optional[str]]:
"""Extracts videos from a raw json page
:param str raw_json: Input json extracted from the page or the last
server response
:rtype: Tuple[List[str], Optional[str]]
:returns: Tuple containing a list of up to 100 video watch ids and
a continuation token, if more videos are available
"""
initial_data = json.loads(raw_json)
try:
# this is the json tree structure, if the json was extracted from
# html
section_contents = initial_data["contents"][
"twoColumnBrowseResultsRenderer"][
"tabs"][0]["tabRenderer"]["content"][
"sectionListRenderer"]["contents"]
try:
# Playlist without submenus
important_content = section_contents[
0]["itemSectionRenderer"][
"contents"][0]["playlistVideoListRenderer"]
except (KeyError, IndexError, TypeError):
# Playlist with submenus
important_content = section_contents[
1]["itemSectionRenderer"][
"contents"][0]["playlistVideoListRenderer"]
videos = important_content["contents"]
except (KeyError, IndexError, TypeError):
try:
# this is the json tree structure, if the json was directly sent
# by the server in a continuation response
# no longer a list and no longer has the "response" key
important_content = initial_data['onResponseReceivedActions'][0][
'appendContinuationItemsAction']['continuationItems']
videos = important_content
except (KeyError, IndexError, TypeError) as p:
logger.info(p)
return [], None
try:
continuation = videos[-1]['continuationItemRenderer'][
'continuationEndpoint'
]['continuationCommand']['token']
videos = videos[:-1]
except (KeyError, IndexError):
# if there is an error, no continuation is available
continuation = None
# remove duplicates
return (
uniqueify(
list(
# only extract the video ids from the video data
map(
lambda x: (
f"/watch?v="
f"{x['playlistVideoRenderer']['videoId']}"
),
videos
)
),
),
continuation,
)
def trimmed(self, video_id: str) -> Iterable[str]:
"""Retrieve a list of YouTube video URLs trimmed at the given video ID
i.e. if the playlist has video IDs 1,2,3,4 calling trimmed(3) returns
[1,2]
:type video_id: str
video ID to trim the returned list of playlist URLs at
:rtype: List[str]
:returns:
List of video URLs from the playlist trimmed at the given ID
"""
for page in self._paginate(until_watch_id=video_id):
yield from (self._video_url(watch_path) for watch_path in page)
def url_generator(self):
"""Generator that yields video URLs.
:Yields: Video URLs
"""
for page in self._paginate():
for video in page:
yield self._video_url(video)
@property # type: ignore
@cache
def video_urls(self) -> DeferredGeneratorList:
"""Complete links of all the videos in playlist
:rtype: List[str]
:returns: List of video URLs
"""
return DeferredGeneratorList(self.url_generator())
def videos_generator(self):
for url in self.video_urls:
yield YouTube(url)
@property
def videos(self) -> Iterable[YouTube]:
"""Yields YouTube objects of videos in this playlist
:rtype: List[YouTube]
:returns: List of YouTube
"""
return DeferredGeneratorList(self.videos_generator())
def __getitem__(self, i: Union[slice, int]) -> Union[str, List[str]]:
return self.video_urls[i]
def __len__(self) -> int:
return len(self.video_urls)
def __repr__(self) -> str:
return f"{repr(self.video_urls)}"
@property
@cache
def last_updated(self) -> Optional[date]:
"""Extract the date that the playlist was last updated.
For some playlists, this will be a specific date, which is returned as a datetime
object. For other playlists, this is an estimate such as "1 week ago". Due to the
fact that this value is returned as a string, pytube does a best-effort parsing
where possible, and returns the raw string where it is not possible.
:return: Date of last playlist update where possible, else the string provided
:rtype: datetime.date
"""
last_updated_text = self.sidebar_info[0]['playlistSidebarPrimaryInfoRenderer'][
'stats'][2]['runs'][1]['text']
try:
date_components = last_updated_text.split()
month = date_components[0]
day = date_components[1].strip(',')
year = date_components[2]
return datetime.strptime(
f"{month} {day:0>2} {year}", "%b %d %Y"
).date()
except (IndexError, KeyError):
return last_updated_text
@property
@cache
def title(self) -> Optional[str]:
"""Extract playlist title
:return: playlist title (name)
:rtype: Optional[str]
"""
return self.sidebar_info[0]['playlistSidebarPrimaryInfoRenderer'][
'title']['runs'][0]['text']
@property
def description(self) -> str:
return self.sidebar_info[0]['playlistSidebarPrimaryInfoRenderer'][
'description']['simpleText']
@property
def length(self):
"""Extract the number of videos in the playlist.
:return: Playlist video count
:rtype: int
"""
count_text = self.sidebar_info[0]['playlistSidebarPrimaryInfoRenderer'][
'stats'][0]['runs'][0]['text']
count_text = count_text.replace(',','')
return int(count_text)
@property
def views(self):
"""Extract view count for playlist.
:return: Playlist view count
:rtype: int
"""
# "1,234,567 views"
views_text = self.sidebar_info[0]['playlistSidebarPrimaryInfoRenderer'][
'stats'][1]['simpleText']
# "1,234,567"
count_text = views_text.split()[0]
# "1234567"
count_text = count_text.replace(',', '')
return int(count_text)
@property
def owner(self):
"""Extract the owner of the playlist.
:return: Playlist owner name.
:rtype: str
"""
return self.sidebar_info[1]['playlistSidebarSecondaryInfoRenderer'][
'videoOwner']['videoOwnerRenderer']['title']['runs'][0]['text']
@property
def owner_id(self):
"""Extract the channel_id of the owner of the playlist.
:return: Playlist owner's channel ID.
:rtype: str
"""
return self.sidebar_info[1]['playlistSidebarSecondaryInfoRenderer'][
'videoOwner']['videoOwnerRenderer']['title']['runs'][0][
'navigationEndpoint']['browseEndpoint']['browseId']
@property
def owner_url(self):
"""Create the channel url of the owner of the playlist.
:return: Playlist owner's channel url.
:rtype: str
"""
return f'https://www.youtube.com/channel/{self.owner_id}'
@staticmethod
def _video_url(watch_path: str):
return f"https://www.youtube.com{watch_path}"
|
unlicense
|
a750825a6ad36ac1a7782e70397c068b
| 32.899761
| 89
| 0.558857
| 4.542373
| false
| false
| false
| false
|
mozilla-iam/cis
|
python-modules/cis_crypto/cis_crypto/cli.py
|
1
|
3035
|
#!/usr/bin/env python3
import argparse
import jose
import logging
import sys
from cis_crypto import common
from cis_crypto import operation
class cli:
def __init__(self):
self.config = None
self.prog = sys.argv[0].split("/")[-1]
def parse_args(self, args):
parser = argparse.ArgumentParser(
description="""
Command line wrapper for mozilla-iam sign verify/operations of JSON and YAML using JWKS.
"""
)
subparsers = parser.add_subparsers(dest="cryptographic-operation")
subparsers.required = True
sign_operation_parser = subparsers.add_parser(
"sign", help="Use a jwks key to generate a signature for a file. (Assumes a json or yaml file)"
)
sign_operation_parser.add_argument(
"--file", help="The path to the file you would like to sign. (Assumes a json or yaml file)"
)
sign_operation_parser.set_defaults(func="sign_operation")
verify_operation_parser = subparsers.add_parser(
"verify", help="Verify a signture with a known file. (Assumes a json file)"
)
verify_operation_parser.add_argument("--file", help="The path to the file you would like to sign.")
verify_operation_parser.set_defaults(func="verify_operation")
return parser.parse_args(args)
def run(self):
logger = logging.getLogger(__name__)
self.config = self.parse_args(sys.argv[1:])
if self.config.func == "sign_operation":
logger.info("Attempting to sign file: {}".format(self.config.file))
file_content = common.load_file(self.config.file)
signing_object = operation.Sign()
signing_object.load(file_content)
jws = signing_object.jws()
common.write_file(jws, "{}.jws".format(self.config.file))
logger.info("File signed. Your signed file is now: {}.jws".format(self.config.file))
logger.info("To verify this file use cis_crypto verify --file {}.jws".format(self.config.file))
elif self.config.func == "verify_operation":
logger.info("Attempting verification of signature for file: {}".format(self.config.file))
everett_config = common.get_config()
logger.info(
"Attempting fetch of .well-known data from: {}".format(
everett_config("public_key_name", namespace="cis", default="access-file-key.pub.pem")
)
)
file_content = common.load_file(self.config.file)
verify_object = operation.Verify()
verify_object.load(file_content)
try:
jws = verify_object.jws() # This will raise if the signature is invalid.
logger.info("Signature verified for file: {}".format(self.config.file))
except jose.exceptions.JWSError:
logger.error("The signature could not be verified.")
sys.exit()
sys.exit()
|
mpl-2.0
|
6e4ea185859404dd312ca4ca6712d897
| 40.013514
| 107
| 0.604942
| 4.062918
| false
| true
| false
| false
|
mozilla-iam/cis
|
python-modules/cis_notifications/cis_notifications/event.py
|
1
|
5547
|
import logging
import time
import requests
from cis_notifications import common
from cis_notifications import secret
logger = logging.getLogger(__name__)
def expired(ts, leeway=0):
return ts < time.time() + leeway
class Event(object):
"""Handle events from lambda and generate hooks out to publishers."""
def __init__(self, event):
"""[summary]
Arguments:
object {[type]} -- [an instance of the event class.]
event {[type]} -- [the event as ingested from the kinesis stream.]
subscriptions {[type]} -- [list of urls to post notifications to.]
"""
self.config = common.get_config()
self.event = event
self.secret_manager = secret.Manager()
self.access_token = None
def to_notification(self):
"""[summary]
Transform the instance of the event from the stream into a notification payload.
[return] JSON data structure to send using requests.
"""
logger.debug("An event was received", extra={"event": self.event})
updated_record = self.event.get("dynamodb")
operation = "foxy" # Just a place holder in case we have an unhandled event.
if self.event.get("eventName") == "INSERT":
operation = "create"
if self.event.get("eventName") == "MODIFY":
operation = "update"
if self.event.get("eventName") == "REMOVE":
operation = "delete"
if updated_record is not None:
# Provided the event is the structure that
notification = {
"operation": operation,
"id": updated_record["Keys"]["id"]["S"],
"time": updated_record["ApproximateCreationDateTime"],
}
logger.debug("Notification generated.", extra={"notification": notification})
return notification
else:
logger.debug("No notification generated.")
return {}
def send(self, notification):
"""[summary]
Get the list of notification endpoints from the object constructor and send a POST with the json payload.
Arguments:
object {[type]} -- [an instance of the event class.]
object {[notification]} -- [A json payload that you would like to send to the RP.]
[return] Dictionary of status codes by publisher.
"""
# Not in-memory access token?
if not self.access_token:
# Load whatever is in our secrets
self.access_token_dict = self.secret_manager.secretmgr("az_access_token")
# Check if what we had in secrets is still valid!
# This includes 10s leeway for clock sync issues and 15min (900s) for max-lambda function time.
# Since tokens are normally valid for 86400s (1 day) that should accomodate for all cases. If these were to
# be less than 15min for any reason, it would simply bypass the cache
if expired(float(self.access_token_dict.get("exp", 0.0)), leeway=910):
logger.info("Access token has expired, refreshing")
authzero = self._get_authzero_client()
self.access_token_dict = authzero.exchange_for_access_token()
# Auth0 gives us the difference (expires_in) not a time stamp, so we need to calculate when the token
# expires.
self.access_token_dict["exp"] = time.time() + float(self.access_token_dict.get("expires_in", 60.0))
self.secret_manager.secretmgr_store("az_access_token", self.access_token_dict)
else:
logger.info("Re-using cached access token")
self.access_token = self.access_token_dict["access_token"]
if notification != {}:
rp_urls = self.config(
"rp_urls", namespace="cis", default="https://dinopark.k8s.dev.sso.allizom.org/events/update"
)
results = {}
for url in rp_urls.split(","):
result = self._notify_via_post(url, notification, self.access_token)
results[url] = result
return results
def _get_authzero_client(self):
authzero = secret.AuthZero(
client_id=self.secret_manager.secret("client_id"),
client_secret=self.secret_manager.secret("client_secret"),
api_identifier=self.config("api_identifier", namespace="cis", default="hook.dev.sso.allizom.org"),
authzero_tenant=self.config("authzero_tenant", namespace="cis", default="auth.mozilla.auth0.com"),
)
return authzero
def _notify_via_post(self, url, json_payload, access_token):
"""[summary]
Notify a single publisher of the user_id that was updated and return only the status code.
Arguments:
url {[type]} -- [the url of the publisher you woud like to notify.]
json_payload {[type]} -- [the event to send to the publisher.]
"""
try:
response = requests.post(
url, json=json_payload, headers={"authorization": "Bearer {}".format(access_token)}
)
return response.status_code
except requests.exceptions.RequestException:
return "Unknown"
except requests.exceptions.HTTPError:
return "HTTPError"
except requests.exceptions.ConnectionError:
return "ConnectionError"
except requests.exceptions.Timeout:
return "Timeout"
|
mpl-2.0
|
0d336f640d1dc4f3cb7fa7d1034befed
| 38.340426
| 119
| 0.597801
| 4.381517
| false
| false
| false
| false
|
mozilla-iam/cis
|
python-modules/cis_profile_retrieval_service/cis_profile_retrieval_service/schema.py
|
1
|
1800
|
import json
import graphene
import cis_profile.graphene
from cis_identity_vault.models import user
from cis_profile_retrieval_service.common import get_table_resource
def is_json(payload):
"""Check if a payload is valid JSON."""
try:
json.loads(payload)
except (TypeError, ValueError):
return False
else:
return True
class Query(graphene.ObjectType):
"""GraphQL Query class for the V2 Profiles."""
profiles = graphene.List(cis_profile.graphene.Profile, primaryEmail=graphene.String(required=False))
profile = graphene.Field(cis_profile.graphene.Profile, userId=graphene.String(required=True))
def resolve_profiles(self, info, **kwargs):
"""GraphQL resolver for the profiles attribute."""
table = get_table_resource()
vault = user.Profile(table)
profiles = []
if kwargs.get("primaryEmail"):
search = vault.find_by_email(kwargs.get("primaryEmail"))
if len(search.get("Items")) > 0:
for profile in search.get("Items"):
profiles.append(json.loads())
else:
for vault_profile in vault.all:
profiles.append(json.loads(vault_profile.get("profile")))
def resolve_profile(self, info, **kwargs):
"""GraphQL resolver for a single profile."""
table = get_table_resource()
vault = user.Profile(table)
if kwargs.get("userId"):
search = vault.find_by_id(kwargs.get("userId"))
if len(search.get("Items")) > 0:
resp = search["Items"][0]["profile"]
else:
resp = json.dumps({})
return resp
class AuthorizationMiddleware:
def resolve(self, next, root, info, **kwargs):
return next(root, info, **kwargs)
|
mpl-2.0
|
2d73a38b2f7944d0edc6af411dde9e66
| 32.333333
| 104
| 0.618889
| 4.026846
| false
| false
| false
| false
|
mozilla-iam/cis
|
python-modules/cis_crypto/setup.py
|
1
|
1388
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from setuptools import setup, find_packages
with open("README.md", "r") as fh:
long_description = fh.read()
requirements = [
"python-jose[cryptography]",
"cryptography",
"everett",
"everett[ini]",
"configobj",
"boto3",
"boto",
"botocore",
"requests",
"pyaml",
]
setup_requirements = ["pytest-runner", "setuptools>=40.5.0"]
test_requirements = ["pytest", "pytest-watch", "pytest-cov", "pytest-mock", "moto", "mock", "flake8", "cis_profile"]
extras = {"test": test_requirements}
setup(
name="cis_crypto",
version="0.0.1",
author="Andrew Krug",
author_email="[email protected]",
description="Per attribute signature system for jwks sign-verify in mozilla-iam.",
long_description=long_description,
url="https://github.com/mozilla-iam/cis",
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: Mozilla Public License",
"Operating System :: OS Independent",
],
install_requires=requirements,
license="Mozilla Public License 2.0",
include_package_data=True,
packages=find_packages(include=["cis_crypto", "bin"]),
scripts=["bin/cis_crypto"],
setup_requires=setup_requirements,
test_suite="tests",
tests_require=test_requirements,
extras_require=extras,
zip_safe=False,
)
|
mpl-2.0
|
1c316fc1356dd8d975ed5651133bf6c5
| 26.215686
| 116
| 0.64121
| 3.435644
| false
| true
| false
| false
|
ibm-watson-iot/iot-python
|
test/test_api_registry_devicetypes.py
|
2
|
6161
|
# *****************************************************************************
# Copyright (c) 2019 IBM Corporation and other Contributors.
#
# All rights reserved. This program and the accompanying materials
# are made available under the terms of the Eclipse Public License v1.0
# which accompanies this distribution, and is available at
# http://www.eclipse.org/legal/epl-v10.html
# *****************************************************************************
import uuid
import pytest
import testUtils
from wiotp.sdk.api.registry.devices import DeviceInfo
from wiotp.sdk.exceptions import ApiException
class TestRegistryDevicetypes(testUtils.AbstractTest):
# =========================================================================
# Device Type tests
# =========================================================================
def testDeviceTypeExistsCheck(self, deviceType):
if deviceType.id in self.appClient.registry.devicetypes:
pass
else:
raise Exception()
if "doesntexist" not in self.appClient.registry.devicetypes:
pass
else:
raise Exception()
def testGetDeviceType(self, deviceType):
retrievedDeviceType = self.appClient.registry.devicetypes[deviceType.id]
assert retrievedDeviceType.id == deviceType.id
assert retrievedDeviceType.classId == "Device"
def testGetDeviceTypeThatDoesntExist(self):
with pytest.raises(Exception):
self.appClient.registry.devicetypes["doesntexist"]
def testUnsupportedCreateUpdate(self):
with pytest.raises(Exception):
self.appClient.registry.devicetypes["d:hldtxx:vm:iot-test-06"] = {"foo", "bar"}
def testListDeviceTypes(self, deviceType):
count = 0
for type in self.appClient.registry.devicetypes:
count += 1
if count > 10:
break
# DeviceTypeDescription test
def testCreateDeviceType(self):
typeId = str(uuid.uuid4())
myDeviceType = self.appClient.registry.devicetypes.create({"id": typeId, "description": "This is a test"})
myDeviceTypeRetrieved = self.appClient.registry.devicetypes[typeId]
assert myDeviceTypeRetrieved.id == typeId
assert myDeviceTypeRetrieved.description == "This is a test"
del self.appClient.registry.devicetypes[typeId]
def testCreateDeviceTypeNone(self):
typeId = str(uuid.uuid4())
myDeviceType = self.appClient.registry.devicetypes.create({"id": typeId, "description": None})
myDeviceTypeRetrieved = self.appClient.registry.devicetypes[typeId]
assert myDeviceTypeRetrieved.id == typeId
assert myDeviceTypeRetrieved.description == None
del self.appClient.registry.devicetypes[typeId]
# Metadata test
def testCreateDeviceMetadata(self):
typeId = str(uuid.uuid4())
myDeviceType = self.appClient.registry.devicetypes.create(
{"id": typeId, "description": "This is still a test", "metadata": {"test": "test"}}
)
myDeviceTypeRetrieved = self.appClient.registry.devicetypes[typeId]
assert myDeviceTypeRetrieved.id == typeId
assert myDeviceTypeRetrieved.description == "This is still a test"
assert myDeviceTypeRetrieved.metadata == {"test": "test"}
del self.appClient.registry.devicetypes[typeId]
def testCreateDeviceMetadataNone(self):
typeId = str(uuid.uuid4())
myDeviceType = self.appClient.registry.devicetypes.create(
{"id": typeId, "description": "This is still a test", "metadata": None}
)
myDeviceTypeRetrieved = self.appClient.registry.devicetypes[typeId]
assert myDeviceTypeRetrieved.id == typeId
assert myDeviceTypeRetrieved.description == "This is still a test"
assert myDeviceTypeRetrieved.metadata == None
del self.appClient.registry.devicetypes[typeId]
def testUpdateDeviceType(self, deviceType):
self.appClient.registry.devicetypes.update(deviceType.id, description="This is still a test")
updatedDeviceType = self.appClient.registry.devicetypes[deviceType.id]
assert updatedDeviceType.description == "This is still a test"
def testUpdateDeviceInfo(self, deviceType):
self.appClient.registry.devicetypes.update(deviceType.id, deviceInfo=DeviceInfo(serialNumber="111"))
updatedDeviceType = self.appClient.registry.devicetypes[deviceType.id]
assert updatedDeviceType.deviceInfo.serialNumber == "111"
# =========================================================================
# Device under DeviceType tests
# =========================================================================
def testDeviceExistsCheck(self, deviceType, device):
if device.deviceId in deviceType.devices:
pass
else:
raise Exception()
if "wheredidyago" not in deviceType.devices:
pass
else:
raise Exception()
def testGetDeviceFromDeviceType(self, deviceType, device):
myDevice = self.appClient.registry.devicetypes[deviceType.id].devices[device.deviceId]
def testListDevicesFromDeviceType(self, deviceType, device):
# Get a device, and cache the response in a local object
count = 0
for device in deviceType.devices:
count += 1
if count > 10:
break
def testCreateDeviceType(self):
with pytest.raises(ApiException):
typeId = 1
r = self.appClient.registry.devicetypes.create(typeId)
def testUpdateDeviceType(self):
with pytest.raises(ApiException):
data = None
r = self.appClient.registry.devicetypes.update(data)
def testDeleteTypeId(self, device, deviceType):
typeId = str(uuid.uuid4())
self.appClient.registry.devicetypes.create(
{"id": typeId, "description": "This is still a test", "metadata": {"test": "test"}}
)
self.appClient.registry.devicetypes.delete(typeId)
assert typeId not in deviceType.devices
|
epl-1.0
|
03ebfc866c44dd5b025dafca891c805b
| 38
| 114
| 0.627333
| 4.388177
| false
| true
| false
| false
|
ibm-watson-iot/iot-python
|
samples/deviceFactory/deviceStatus.py
|
2
|
3130
|
#!/usr/bin/env python
# *****************************************************************************
# Copyright (c) 2019 IBM Corporation and other Contributors.
#
# All rights reserved. This program and the accompanying materials
# are made available under the terms of the Eclipse Public License v1.0
# which accompanies this distribution, and is available at
# http://www.eclipse.org/legal/epl-v10.html
# *****************************************************************************
import argparse
import sys
import os
import yaml
import wiotp.sdk.application
def loadConfigFile(source):
data = {}
with open(source, "r") as sourceFile:
data = yaml.full_load(sourceFile)
return data
if __name__ == "__main__":
# Initialize the properties we need
parser = argparse.ArgumentParser(
description="IBM Watson IoT Platform Device Status. For more information see https://github.com/ibm-watson-iot/iot-python/samples/deviceFactory"
)
parser.add_argument(
"-t",
"--typeId",
required=False,
default="iotpsutil",
help="Set the typeId for the device batch. Defaults to iotpsutil",
)
parser.add_argument(
"-b",
"--batchId",
required=True,
help="DeviceIDs will be prefixed by the batch number, e.g. batchID-0001, batchID-0002",
)
parser.add_argument(
"-n",
"--numberOfDevices",
required=True,
type=int,
help="How many device configuration files should be produced by the factory. Max value is 1000",
)
args, unknown = parser.parse_known_args()
options = wiotp.sdk.application.parseEnvVars()
client = wiotp.sdk.application.ApplicationClient(options)
# Terminal colour mods
red = "%c[31m" % chr(27)
green = "%c[32m" % chr(27)
off = "%c[0m" % chr(27)
statuses = client.registry.connectionStatus.find(typeId=args.typeId)
output = {}
for status in statuses:
# print(status)
clientId = status["id"]
deviceId = clientId.split(":")[3]
if not deviceId.startswith(args.batchId):
continue
(batchId, batchNum) = clientId.split("-")
if status["connectionStatus"] == "disconnected":
output[batchNum] = "%s%s%s" % (red, batchNum, off)
elif status["connectionStatus"] == "connected":
output[batchNum] = "%s%s%s" % (green, batchNum, off)
else:
output[batchNum] = "%s" % (batchNum)
print("=================================================")
print("Device Connection State Report")
print("")
print("%s:%s-x" % (args.typeId, args.batchId))
print("")
print("%sconnected%s / %sdisconnected%s / unknown" % (green, off, red, off))
print("=================================================")
outStr = ""
for i in range(1, args.numberOfDevices + 1):
batchNum = "%04d" % (i)
if batchNum in output:
outStr += output[batchNum] + " "
else:
outStr += batchNum + " "
if batchNum[3] == "0":
outStr += "\n"
print(outStr)
|
epl-1.0
|
0436ee43d53172ed8b41cd5cf2c4b40d
| 30.938776
| 153
| 0.555911
| 3.927227
| false
| false
| false
| false
|
ibm-watson-iot/iot-python
|
src/wiotp/sdk/device/managedClient.py
|
2
|
27070
|
# *****************************************************************************
# Copyright (c) 2014, 2018 IBM Corporation and other Contributors.
#
# All rights reserved. This program and the accompanying materials
# are made available under the terms of the Eclipse Public License v1.0
# which accompanies this distribution, and is available at
# http://www.eclipse.org/legal/epl-v10.html
# *****************************************************************************
from datetime import datetime
import json
import logging
import threading
import pytz
import uuid
from wiotp.sdk import ConnectionException, ConfigurationException
from wiotp.sdk.device.client import DeviceClient
from wiotp.sdk.device.deviceInfo import DeviceInfo
from wiotp.sdk.device.deviceFirmware import DeviceFirmware
class ManagedDeviceClient(DeviceClient):
# Publish MQTT topics
MANAGE_TOPIC = "iotdevice-1/mgmt/manage"
UNMANAGE_TOPIC = "iotdevice-1/mgmt/unmanage"
UPDATE_LOCATION_TOPIC = "iotdevice-1/device/update/location"
ADD_ERROR_CODE_TOPIC = "iotdevice-1/add/diag/errorCodes"
CLEAR_ERROR_CODES_TOPIC = "iotdevice-1/clear/diag/errorCodes"
NOTIFY_TOPIC = "iotdevice-1/notify"
RESPONSE_TOPIC = "iotdevice-1/response"
ADD_LOG_TOPIC = "iotdevice-1/add/diag/log"
CLEAR_LOG_TOPIC = "iotdevice-1/clear/diag/log"
# Subscribe MQTT topics
DM_RESPONSE_TOPIC = "iotdm-1/response"
DM_OBSERVE_TOPIC = "iotdm-1/observe"
DM_REBOOT_TOPIC = "iotdm-1/mgmt/initiate/device/reboot"
DM_FACTORY_REESET = "iotdm-1/mgmt/initiate/device/factory_reset"
DM_UPDATE_TOPIC = "iotdm-1/device/update"
DM_CANCEL_OBSERVE_TOPIC = "iotdm-1/cancel"
DM_FIRMWARE_DOWNLOAD_TOPIC = "iotdm-1/mgmt/initiate/firmware/download"
DM_FIRMWARE_UPDATE_TOPIC = "iotdm-1/mgmt/initiate/firmware/update"
DME_ACTION_TOPIC = "iotdm-1/mgmt/custom/#"
# ResponceCode
RESPONSECODE_FUNCTION_NOT_SUPPORTED = 501
RESPONSECODE_ACCEPTED = 202
RESPONSECODE_INTERNAL_ERROR = 500
RESPONSECODE_BAD_REQUEST = 400
UPDATESTATE_IDLE = 0
UPDATESTATE_DOWNLOADING = 1
UPDATESTATE_DOWNLOADED = 2
UPDATESTATE_SUCCESS = 0
UPDATESTATE_IN_PROGRESS = 1
UPDATESTATE_OUT_OF_MEMORY = 2
UPDATESTATE_CONNECTION_LOST = 3
UPDATESTATE_VERIFICATION_FAILED = 4
UPDATESTATE_UNSUPPORTED_IMAGE = 5
UPDATESTATE_INVALID_URI = 6
def __init__(self, config, logHandlers=None, deviceInfo=None):
if config["identity"]["orgId"] == "quickstart":
raise ConfigurationException("QuickStart does not support device management")
DeviceClient.__init__(self, config, logHandlers)
# Initialize user supplied callback
self.deviceActionCallback = None
self.firmwereActionCallback = None
self.dmeActionCallback = None
messages_callbacks = (
("iotdm-1/#", self.__onDeviceMgmtResponse),
(ManagedDeviceClient.DM_REBOOT_TOPIC, self.__onRebootRequest),
(ManagedDeviceClient.DM_FACTORY_REESET, self.__onFactoryResetRequest),
(ManagedDeviceClient.DM_FIRMWARE_UPDATE_TOPIC, self.__onFirmwereUpdate),
(ManagedDeviceClient.DM_OBSERVE_TOPIC, self.__onFirmwereObserve),
(ManagedDeviceClient.DM_FIRMWARE_DOWNLOAD_TOPIC, self.__onFirmwereDownload),
(ManagedDeviceClient.DM_UPDATE_TOPIC, self.__onUpdatedDevice),
(ManagedDeviceClient.DM_CANCEL_OBSERVE_TOPIC, self.__onFirmwereCancel),
(ManagedDeviceClient.DME_ACTION_TOPIC, self.__onDMEActionRequest),
)
# Add handler for supported device management commands
for message, callback in messages_callbacks:
self.client.message_callback_add(message, callback)
# Initialize user supplied callback
self.client.on_subscribe = self._onSubscribe
self.client.on_disconnect = self._onDisconnect
self.readyForDeviceMgmt = threading.Event()
# List of DM requests that have not received a response yet
self._deviceMgmtRequestsPendingLock = threading.Lock()
self._deviceMgmtRequestsPending = {}
# List of DM notify hook
self._deviceMgmtObservationsLock = threading.Lock()
self._deviceMgmtObservations = []
# Initialize local device data model
self.metadata = {}
if deviceInfo is not None:
self._deviceInfo = deviceInfo
else:
self._deviceInfo = DeviceInfo()
self._location = None
self._errorCode = None
self.__firmwareUpdate = None
self.manageTimer = None
# Register startup subscription list
self._subscriptions[self.DM_RESPONSE_TOPIC] = 1
self._subscriptions[self.DM_OBSERVE_TOPIC] = 1
self._subscriptions[self.DM_REBOOT_TOPIC] = 1
self._subscriptions[self.DM_FACTORY_REESET] = 1
self._subscriptions[self.DM_UPDATE_TOPIC] = 1
self._subscriptions[self.DM_FIRMWARE_UPDATE_TOPIC] = 1
self._subscriptions[self.DM_FIRMWARE_DOWNLOAD_TOPIC] = 1
self._subscriptions[self.DM_CANCEL_OBSERVE_TOPIC] = 1
self._subscriptions[self._COMMAND_TOPIC] = 1
self._subscriptions[self.DME_ACTION_TOPIC] = 1
def setProperty(self, name, value):
if name not in [
"serialNumber",
"manufacturer",
"model",
"deviceClass",
"description",
"fwVersion",
"hwVersion",
"descriptiveLocation",
]:
raise Exception("Unsupported property name: %s" % name)
self._deviceInfo[name] = value
return self.notifyFieldChange("deviceInfo.%s" % name, value)
def notifyFieldChange(self, field, value):
with self._deviceMgmtObservationsLock:
if field in self._deviceMgmtObservations:
if not self.readyForDeviceMgmt.wait(timeout=10):
self.logger.warning(
"Unable to notify service of field "
"change because device is not ready "
"for device management"
)
return threading.Event().set()
reqId = str(uuid.uuid4())
message = {"d": {"field": field, "value": value}, "reqId": reqId}
resolvedEvent = threading.Event()
self.client.publish(ManagedDeviceClient.NOTIFY_TOPIC, payload=json.dumps(message), qos=1, retain=False)
with self._deviceMgmtRequestsPendingLock:
self._deviceMgmtRequestsPending[reqId] = {
"topic": ManagedDeviceClient.NOTIFY_TOPIC,
"message": message,
"event": resolvedEvent,
}
return resolvedEvent
else:
return threading.Event().set()
def _onSubscribe(self, mqttc, userdata, mid, granted_qos):
super(ManagedDeviceClient, self)._onSubscribe(mqttc, userdata, mid, granted_qos)
# Once IoTF acknowledges the subscriptions we are able to process commands and responses from device management server
self.manage()
def manage(
self,
lifetime=3600,
supportDeviceActions=True,
supportFirmwareActions=True,
supportDeviceMgmtExtActions=False,
bundleIds=[],
):
# TODO: throw an error, minimum lifetime this client will support is 1 hour, but for now set lifetime to infinite if it's invalid
if lifetime < 3600:
lifetime = 0
if not self.subscriptionsAcknowledged.wait(timeout=10):
self.logger.warning(
"Unable to send register for device " "management because device subscriptions " "are not in place"
)
return threading.Event().set()
reqId = str(uuid.uuid4())
message = {
"d": {
"lifetime": lifetime,
"supports": {"deviceActions": supportDeviceActions, "firmwareActions": supportFirmwareActions},
"deviceInfo": self._deviceInfo.__dict__,
"metadata": self.metadata,
},
"reqId": reqId,
}
if supportDeviceMgmtExtActions and len(bundleIds) > 0:
for bundleId in bundleIds:
message["d"]["supports"][bundleId] = supportDeviceMgmtExtActions
resolvedEvent = threading.Event()
self.client.publish(ManagedDeviceClient.MANAGE_TOPIC, payload=json.dumps(message), qos=1, retain=False)
with self._deviceMgmtRequestsPendingLock:
self._deviceMgmtRequestsPending[reqId] = {
"topic": ManagedDeviceClient.MANAGE_TOPIC,
"message": message,
"event": resolvedEvent,
}
# Register the future call back to Watson IoT Platform 2 minutes before the device lifetime expiry
if lifetime != 0:
if self.manageTimer is not None:
self.logger.debug("Cancelling existing manage timer")
self.manageTimer.cancel()
self.manageTimer = threading.Timer(
lifetime - 120,
self.manage,
[lifetime, supportDeviceActions, supportFirmwareActions, supportDeviceMgmtExtActions, bundleIds],
)
self.manageTimer.start()
return resolvedEvent
def unmanage(self):
if not self.readyForDeviceMgmt.wait(timeout=10):
self.logger.warning(
"Unable to set device to unmanaged because " "device is not ready for device management"
)
return threading.Event().set()
reqId = str(uuid.uuid4())
message = {"reqId": reqId}
resolvedEvent = threading.Event()
self.client.publish(ManagedDeviceClient.UNMANAGE_TOPIC, payload=json.dumps(message), qos=1, retain=False)
with self._deviceMgmtRequestsPendingLock:
self._deviceMgmtRequestsPending[reqId] = {
"topic": ManagedDeviceClient.UNMANAGE_TOPIC,
"message": message,
"event": resolvedEvent,
}
return resolvedEvent
def setLocation(self, longitude, latitude, elevation=None, accuracy=None):
# TODO: Add validation (e.g. ensure numeric values)
if self._location is None:
self._location = {}
self._location["longitude"] = longitude
self._location["latitude"] = latitude
if elevation:
self._location["elevation"] = elevation
self._location["measuredDateTime"] = datetime.now(pytz.timezone("UTC")).isoformat()
if accuracy:
self._location["accuracy"] = accuracy
elif "accuracy" in self._location:
del self._location["accuracy"]
if not self.readyForDeviceMgmt.wait(timeout=10):
self.logger.warning(
"Unable to publish device location because " "device is not ready for device management"
)
return threading.Event().set()
reqId = str(uuid.uuid4())
message = {"d": self._location, "reqId": reqId}
resolvedEvent = threading.Event()
self.client.publish(ManagedDeviceClient.UPDATE_LOCATION_TOPIC, payload=json.dumps(message), qos=1, retain=False)
with self._deviceMgmtRequestsPendingLock:
self._deviceMgmtRequestsPending[reqId] = {
"topic": ManagedDeviceClient.UPDATE_LOCATION_TOPIC,
"message": message,
"event": resolvedEvent,
}
return resolvedEvent
def setErrorCode(self, errorCode=0):
if errorCode is None:
errorCode = 0
self._errorCode = errorCode
if not self.readyForDeviceMgmt.wait(timeout=10):
self.logger.warning("Unable to publish error code because " "device is not ready for device management")
return threading.Event().set()
reqId = str(uuid.uuid4())
message = {"d": {"errorCode": errorCode}, "reqId": reqId}
resolvedEvent = threading.Event()
self.client.publish(ManagedDeviceClient.ADD_ERROR_CODE_TOPIC, payload=json.dumps(message), qos=1, retain=False)
with self._deviceMgmtRequestsPendingLock:
self._deviceMgmtRequestsPending[reqId] = {
"topic": ManagedDeviceClient.ADD_ERROR_CODE_TOPIC,
"message": message,
"event": resolvedEvent,
}
return resolvedEvent
def clearErrorCodes(self):
self._errorCode = None
if not self.readyForDeviceMgmt.wait(timeout=10):
self.logger.warning("Unable to clear error codes because " "device is not ready for device management")
return threading.Event().set()
reqId = str(uuid.uuid4())
message = {"reqId": reqId}
resolvedEvent = threading.Event()
self.client.publish(
ManagedDeviceClient.CLEAR_ERROR_CODES_TOPIC, payload=json.dumps(message), qos=1, retain=False
)
with self._deviceMgmtRequestsPendingLock:
self._deviceMgmtRequestsPending[reqId] = {
"topic": ManagedDeviceClient.CLEAR_ERROR_CODES_TOPIC,
"message": message,
"event": resolvedEvent,
}
return resolvedEvent
def addLog(self, msg="", data="", sensitivity=0):
timestamp = datetime.now().isoformat()
if not self.readyForDeviceMgmt.wait(timeout=10):
self.logger.warning("Unable to publish error code because " "device is not ready for device management")
return threading.Event().set()
reqId = str(uuid.uuid4())
message = {"d": {"message": msg, "timestamp": timestamp, "data": data, "severity": sensitivity}, "reqId": reqId}
resolvedEvent = threading.Event()
self.client.publish(ManagedDeviceClient.ADD_LOG_TOPIC, payload=json.dumps(message), qos=1, retain=False)
with self._deviceMgmtRequestsPendingLock:
self._deviceMgmtRequestsPending[reqId] = {
"topic": ManagedDeviceClient.ADD_LOG_TOPIC,
"message": message,
"event": resolvedEvent,
}
return resolvedEvent
def clearLog(self):
if not self.readyForDeviceMgmt.wait(timeout=10):
self.logger.warning("Unable to clear log because device is not ready for device management")
return threading.Event().set()
reqId = str(uuid.uuid4())
message = {"reqId": reqId}
resolvedEvent = threading.Event()
self.client.publish(ManagedDeviceClient.CLEAR_LOG_TOPIC, payload=json.dumps(message), qos=1, retain=False)
with self._deviceMgmtRequestsPendingLock:
self._deviceMgmtRequestsPending[reqId] = {
"topic": ManagedDeviceClient.CLEAR_LOG_TOPIC,
"message": message,
"event": resolvedEvent,
}
return resolvedEvent
def __onDeviceMgmtResponse(self, client, userdata, pahoMessage):
try:
data = json.loads(pahoMessage.payload.decode("utf-8"))
if "rc" not in data:
return True
rc = data["rc"]
reqId = data["reqId"]
except ValueError as e:
raise Exception('Unable to parse JSON. payload="%s" error=%s' % (pahoMessage.payload, str(e)))
else:
request = None
with self._deviceMgmtRequestsPendingLock:
try:
request = self._deviceMgmtRequestsPending.pop(reqId)
except KeyError:
self.logger.warning("Received unexpected response from " "device management: %s", reqId)
else:
self.logger.debug(
"Remaining unprocessed device " "management requests: %s", len(self._deviceMgmtRequestsPending)
)
if request is None:
return False
state = {
ManagedDeviceClient.MANAGE_TOPIC: {
# rc, json.dumps(request['message'])
"msg_succ": "[%s] Manage action completed: %s",
"msg_fail": "[%s] Manage action failed: %s",
},
ManagedDeviceClient.UNMANAGE_TOPIC: {
"msg_succ": "[%s] Unmanage action completed: %s",
"msg_fail": "[%s] Unmanage action failed: %s",
},
ManagedDeviceClient.UPDATE_LOCATION_TOPIC: {
"msg_succ": "[%s] Location update action completed: %s",
"msg_fail": "[%s] Location update action failed: %s",
},
ManagedDeviceClient.ADD_ERROR_CODE_TOPIC: {
"msg_succ": "[%s] Add error code action completed: %s",
"msg_fail": "[%s] Add error code action failed: %s",
},
ManagedDeviceClient.CLEAR_ERROR_CODES_TOPIC: {
"msg_succ": "[%s] Clear error codes action completed: %s",
"msg_fail": "[%s] Clear error codes action failed: %s",
},
ManagedDeviceClient.ADD_LOG_TOPIC: {
"msg_succ": "[%s] Add log action completed: %s",
"msg_fail": "[%s] Add log action failed: %s",
},
ManagedDeviceClient.CLEAR_LOG_TOPIC: {
"msg_succ": "[%s] Clear log action completed: %s",
"msg_fail": "[%s] Clear log action failed: %s",
},
}
try:
msg_succ = state[request["topic"]]["msg_succ"]
msg_fail = state[request["topic"]]["msg_fail"]
except Exception as e:
self.logger.warning("[%s] Unknown action response: %s", rc, json.dumps(request["message"]))
else:
dump_str = json.dumps(request["message"])
if rc == 200:
self.logger.info(msg_succ, rc, dump_str)
else:
self.logger.critical(msg_fail, rc, dump_str)
if request["topic"] == ManagedDeviceClient.MANAGE_TOPIC:
self.readyForDeviceMgmt.set()
elif request["topic"] == ManagedDeviceClient.UNMANAGE_TOPIC:
self.readyForDeviceMgmt.clear()
# Now clear the event, allowing anyone that was waiting on this to proceed
request["event"].set()
return True
# Device Action Handlers
def __onRebootRequest(self, client, userdata, pahoMessage):
paho_payload = pahoMessage.payload.decode("utf-8")
self.logger.info(
"Message received on topic :%s with payload %s", ManagedDeviceClient.DM_REBOOT_TOPIC, paho_payload
)
try:
data = json.loads(paho_payload)
reqId = data["reqId"]
if self.deviceActionCallback:
self.deviceActionCallback(reqId, "reboot")
except ValueError as e:
raise Exception('Unable to process Reboot request. payload="%s" error=%s' % (pahoMessage.payload, str(e)))
def __onFactoryResetRequest(self, client, userdata, pahoMessage):
paho_payload = pahoMessage.payload.decode("utf-8")
self.logger.info(
"Message received on topic :%s with payload %s", ManagedDeviceClient.DM_FACTORY_REESET, paho_payload
)
try:
data = json.loads(paho_payload)
reqId = data["reqId"]
if self.deviceActionCallback:
self.deviceActionCallback(reqId, "reset")
except ValueError as e:
raise Exception(
'Unable to process Factory Reset request. payload="%s" error=%s' % (pahoMessage.payload, str(e))
)
def respondDeviceAction(self, reqId, responseCode=202, message=""):
response = {"rc": responseCode, "message": message, "reqId": reqId}
payload = json.dumps(response)
self.logger.info("Publishing Device Action response with payload :%s", payload)
self.client.publish("iotdevice-1/response", payload, qos=1, retain=False)
# Firmware Handlers
def __onFirmwereDownload(self, client, userdata, pahoMessage):
paho_payload = pahoMessage.payload.decode("utf-8")
self.logger.info(
"Message received on topic :%s with payload %s",
ManagedDeviceClient.DM_FIRMWARE_DOWNLOAD_TOPIC,
paho_payload,
)
data = json.loads(paho_payload)
reqId = data["reqId"]
rc = ManagedDeviceClient.RESPONSECODE_ACCEPTED
msg = ""
if self.__firmwareUpdate.state != ManagedDeviceClient.UPDATESTATE_IDLE:
rc = ManagedDeviceClient.RESPONSECODE_BAD_REQUEST
msg = "Cannot download as the device is not in idle state"
thread = threading.Thread(target=self.respondDeviceAction, args=(reqId, rc, msg), name="respondDeviceAction")
thread.start()
if self.firmwereActionCallback:
self.firmwereActionCallback("download", self.__firmwareUpdate)
def __onFirmwereCancel(self, client, userdata, pahoMessage):
paho_payload = pahoMessage.payload.decode("utf-8")
self.logger.info(
"Message received on topic :%s with payload %s", ManagedDeviceClient.DM_CANCEL_OBSERVE_TOPIC, paho_payload
)
data = json.loads(paho_payload)
reqId = data["reqId"]
thread = threading.Thread(target=self.respondDeviceAction, args=(reqId, 200, ""), name="respondDeviceAction")
thread.start()
def __onFirmwereObserve(self, client, userdata, pahoMessage):
paho_payload = pahoMessage.payload.decode("utf-8")
self.logger.info(
"Message received on topic :%s with payload %s", ManagedDeviceClient.DM_OBSERVE_TOPIC, paho_payload
)
data = json.loads(paho_payload)
reqId = data["reqId"]
# TODO: Proprer validation for fields in payload
thread = threading.Thread(target=self.respondDeviceAction, args=(reqId, 200, ""), name="respondDeviceAction")
thread.start()
def __onUpdatedDevice(self, client, userdata, pahoMessage):
paho_payload = pahoMessage.payload.decode("utf-8")
self.logger.info(
"Message received on topic :%s with payload %s", ManagedDeviceClient.DM_UPDATE_TOPIC, paho_payload
)
data = json.loads(paho_payload)
if "reqId" in data:
reqId = data["reqId"]
d = data["d"]
value = None
for obj in d["fields"]:
if "field" in obj:
if obj["field"] == "mgmt.firmware":
value = obj["value"]
if value is not None:
self.__firmwareUpdate = DeviceFirmware(
value["version"],
value["name"],
value["uri"],
value["verifier"],
value["state"],
value["updateStatus"],
value["updatedDateTime"],
)
thread = threading.Thread(
target=self.respondDeviceAction, args=(reqId, 204, ""), name="respondDeviceAction"
)
thread.start()
else:
d = data["d"]
value = None
for obj in d["fields"]:
if "field" in obj:
if obj["field"] == "metadata":
value = obj["value"]
if value is not None:
self.metadata = value
def setState(self, status):
notify = {"d": {"fields": [{"field": "mgmt.firmware", "value": {"state": status}}]}}
if self.__firmwareUpdate is not None:
self.__firmwareUpdate.state = status
self.logger.info("Publishing state Update with payload :%s", json.dumps(notify))
thread = threading.Thread(
target=self.client.publish, args=("iotdevice-1/notify", json.dumps(notify), 1, False), name="client.publish"
)
thread.start()
def setUpdateStatus(self, status):
notify = {
"d": {
"fields": [
{
"field": "mgmt.firmware",
"value": {"state": ManagedDeviceClient.UPDATESTATE_IDLE, "updateStatus": status},
}
]
}
}
if self.__firmwareUpdate is not None:
self.__firmwareUpdate.state = ManagedDeviceClient.UPDATESTATE_IDLE
self.__firmwareUpdate.updateStatus = status
self.logger.info("Publishing Update Status with payload :%s", json.dumps(notify))
thread = threading.Thread(
target=self.client.publish, args=("iotdevice-1/notify", json.dumps(notify), 1, False), name="client.publish"
)
thread.start()
def __onFirmwereUpdate(self, client, userdata, pahoMessage):
paho_payload = pahoMessage.payload.decode("utf-8")
self.logger.info(
"Message received on topic :%s with payload %s", ManagedDeviceClient.DM_FIRMWARE_UPDATE_TOPIC, paho_payload
)
data = json.loads(paho_payload)
reqId = data["reqId"]
rc = ManagedDeviceClient.RESPONSECODE_ACCEPTED
msg = ""
if self.__firmwareUpdate.state != ManagedDeviceClient.UPDATESTATE_DOWNLOADED:
rc = ManagedDeviceClient.RESPONSECODE_BAD_REQUEST
msg = "Firmware is still not successfully downloaded."
thread = threading.Thread(target=self.respondDeviceAction, args=(reqId, rc, msg), name="respondDeviceAction")
thread.start()
if self.firmwereActionCallback:
self.firmwereActionCallback("update", self.__firmwareUpdate)
def __onDMEActionRequest(self, client, userdata, pahoMessage):
data = json.loads(pahoMessage.payload.decode("utf-8"))
self.logger.info("Message received on topic :%s with payload %s", ManagedDeviceClient.DME_ACTION_TOPIC, data)
reqId = data["reqId"]
if self.dmeActionCallback:
if self.dmeActionCallback(pahoMessage.topic, data, reqId):
msg = "DME Action successfully completed from Callback"
thread = threading.Thread(
target=self.respondDeviceAction, args=(reqId, 200, msg), name="respondDeviceAction"
)
thread.start()
else:
msg = "Unexpected device error"
thread = threading.Thread(
target=self.respondDeviceAction, args=(reqId, 500, msg), name="respondDeviceAction"
)
thread.start()
else:
thread = threading.Thread(
target=self.respondDeviceAction,
args=(reqId, 501, "Operation not implemented"),
name="respondDeviceAction",
)
thread.start()
|
epl-1.0
|
145ea361c0c78a021c0675ca9b269ad6
| 40.774691
| 137
| 0.590137
| 4.221772
| false
| false
| false
| false
|
ibm-watson-iot/iot-python
|
src/wiotp/sdk/gateway/messages.py
|
2
|
2136
|
# *****************************************************************************
# Copyright (c) 2019 IBM Corporation and other Contributors.
#
# All rights reserved. This program and the accompanying materials
# are made available under the terms of the Eclipse Public License v1.0
# which accompanies this distribution, and is available at
# http://www.eclipse.org/legal/epl-v10.html
# *****************************************************************************
import re
from wiotp.sdk import MissingMessageDecoderException, InvalidEventException
COMMAND_RE = re.compile("iot-2/type/(.+)/id/(.+)/cmd/(.+)/fmt/(.+)")
class Command:
def __init__(self, pahoMessage, messageEncoderModules):
result = COMMAND_RE.match(pahoMessage.topic)
if result:
self.typeId = result.group(1)
self.deviceId = result.group(2)
self.commandId = result.group(3)
self.format = result.group(4)
if self.format in messageEncoderModules:
message = messageEncoderModules[self.format].decode(pahoMessage)
self.timestamp = message.timestamp
self.data = message.data
else:
raise MissingMessageDecoderException(self.format)
else:
raise InvalidEventException("Received command on invalid topic: %s" % (pahoMessage.topic))
NOTIFY_RE = re.compile("iot-2/type/(.+)/id/(.+)/notify")
class Notification:
def __init__(self, pahoMessage, messageEncoderModules):
result = NOTIFY_RE.match(pahoMessage.topic)
if result:
self.typeId = result.group(1)
self.deviceId = result.group(2)
self.format = "json"
if self.format in messageEncoderModules:
message = messageEncoderModules[self.format].decode(pahoMessage)
self.timestamp = message.timestamp
self.data = message.data
else:
raise MissingMessageDecoderException(self.format)
else:
raise InvalidEventException("Received notification on invalid topic: %s" % (pahoMessage.topic))
|
epl-1.0
|
0945e4c78d6be15cc3d53a501d7b2024
| 39.301887
| 107
| 0.594569
| 4.377049
| false
| false
| false
| false
|
mbj4668/pyang
|
pyang/transforms/edit.py
|
1
|
12718
|
"""Edit transform plugin
This plugin currently has quite limited functionality. Only some specific
top-level items can be edited, and only existing statements are edited.
"""
import copy
import optparse
import re
import sys
from pyang import error
from pyang import plugin
from pyang import statements
plugin_name = 'edit'
# noinspection PyUnusedLocal
def check_date(option, opt, value):
if not re.match(r'^\d{4}-\d{2}-\d{2}$', value):
raise optparse.OptionValueError(
'option %s: invalid yyyy-mm-dd date: %s' % (opt, value))
return value
class EditOption(optparse.Option):
TYPES = optparse.Option.TYPES + ('date',)
TYPE_CHECKER = copy.copy(optparse.Option.TYPE_CHECKER)
TYPE_CHECKER['date'] = check_date
def pyang_plugin_init():
plugin.register_plugin(EditPlugin())
class EditPlugin(plugin.PyangPlugin):
def add_opts(self, optparser):
optlist = [
# set YANG version (this does nothing if there's no yang-version
# statement)
EditOption("--edit-yang-version", dest="edit_yang_version",
metavar="VERSION",
help="Set YANG version to the supplied value"),
# set namespace (this does nothing if there's no namespace
# statement)
EditOption("--edit-namespace", dest="edit_namespace",
metavar="NAMESPACE",
help="Set YANG namespace to the supplied value"),
# set imported/included module/submodule revision dates
EditOption("--edit-update-import-dates",
dest="edit_update_import_dates", default=False,
action="store_true",
help="Set import/include revision-date "
"statements to match imported/included "
"modules/submodules"),
EditOption("--edit-delete-import-dates",
dest="edit_delete_import_dates", default=False,
action="store_true",
help="Delete import/include revision-date "
"statements"),
# set meta info (these do nothing if there's no corresponding
# metadata statement)
EditOption("--edit-organization", dest="edit_organization",
metavar="ORGANIZATION",
help="Set module/submodule organization "
"to the supplied value"),
EditOption("--edit-contact", dest="edit_contact",
metavar="CONTACT", help="Set module/submodule contact "
"to the supplied value"),
EditOption("--edit-description", dest="edit_description",
metavar="DESCRIPTION",
help="Set module/submodule description "
"to the supplied value"),
# set revision info (these do nothing if there's no revision
# statement)
EditOption("--edit-delete-revisions-after",
dest="edit_delete_revisions_after", type="date",
metavar="PREVDATE",
help="Delete any revisions after "
"the supplied yyyy-mm-dd"),
EditOption("--edit-revision-date", dest="edit_revision_date",
type="date", metavar="DATE",
help="Set most recent revision date "
"to the supplied yyyy-mm-dd"),
EditOption("--edit-revision-description",
dest="edit_revision_description", metavar="DESCRIPTION",
help="Set most recent revision description "
"to the supplied value"),
EditOption("--edit-revision-reference",
dest="edit_revision_reference", metavar="REFERENCE",
help="Set most recent revision reference "
"to the supplied value")
]
g = optparser.add_option_group("Edit transform specific options")
g.add_options(optlist)
def add_transform(self, xforms):
xforms[plugin_name] = self
def transform(self, ctx, modules):
edit_tree(ctx, modules)
def edit_tree(ctx, modules):
def optval(key):
dest = ('%s-%s' % (plugin_name, key)).replace('-', '_')
return getattr(ctx.opts, dest, None)
for module in modules:
for keyword in ['yang-version', 'namespace']:
arg = optval(keyword)
if arg is not None:
update_or_add_stmt(module, keyword, arg)
substmts = []
revision_done = False
for stmt in module.substmts:
replstmts = None
if stmt.keyword in ['import', 'include']:
# XXX should check that these options aren't both set
if ctx.opts.edit_update_import_dates:
update_import_date(ctx, stmt)
elif ctx.opts.edit_delete_import_dates:
delete_import_date(ctx, stmt)
elif stmt.keyword in ['organization', 'contact', 'description']:
arg = optval(stmt.keyword)
if arg is not None:
set_meta_details(ctx, stmt, arg)
elif stmt.keyword == 'revision' and not revision_done:
allrevs = module.search('revision')
lastrev = stmt == allrevs[-1]
replstmts, revision_done = set_revision_details(ctx, stmt,
lastrev)
substmts += [stmt] if replstmts is None else replstmts
# XXX should we tidy up any of the deleted statements?
module.substmts = substmts
def update_import_date(ctx, stmt):
imprev = stmt.search_one('revision-date')
imprevdate = imprev.arg if imprev else None
impmod = ctx.get_module(stmt.arg, imprevdate)
impmodrev = impmod.search_one('revision') if impmod else None
impmodrevdate = impmodrev.arg if impmodrev else None
if not imprev or impmodrevdate > imprevdate:
update_or_add_stmt(stmt, 'revision-date', impmodrevdate)
# noinspection PyUnusedLocal
def delete_import_date(ctx, stmt):
imprev = stmt.search_one('revision-date')
if imprev:
delete_stmt(stmt, imprev)
# noinspection PyUnusedLocal
def set_meta_details(ctx, stmt, arg):
(newarg, ignore) = get_arg_value(arg, stmt.arg)
if newarg is not None:
stmt.arg = newarg
# XXX note that this logic relies on there already being at least one
# revision statement; --lint checks this so it should be OK
def set_revision_details(ctx, stmt, lastrev):
revision_done = False
# relevant options
opts = {
'olddate': ctx.opts.edit_delete_revisions_after,
'newdate': ctx.opts.edit_revision_date,
'description': ctx.opts.edit_revision_description,
'reference': ctx.opts.edit_revision_reference
}
# the logic is quite tricky; here's what we want to achieve:
# * 'olddate' is the date of the oldest revision to be retained; if not
# supplied, any existing revisions are deleted
# * if 'newdate' is supplied, it's the date of the next published
# revision and is to be inserted at the start of any remaining
# revisions
# * reuse rather than delete the oldest revision statement, purely in
# order to retain any blank lines after it
# default action is to do nothing
action = ''
#sys.stderr.write('revision %s (lastrev %s)\n' % (stmt.arg, lastrev))
# only adjust revisions if either olddate or newdate is supplied
olddate = opts.get('olddate', None)
newdate = opts.get('newdate', None)
if olddate is not None or newdate is not None:
# determine whether to delete this old revision
if olddate is None or stmt.arg > olddate:
action = 'delete'
#sys.stderr.write('-> delete (olddate %s)\n' % olddate)
# determine whether to insert the new revision
if newdate is not None and (action != 'delete' or lastrev):
action = 'replace' if action == 'delete' else 'insert'
#sys.stderr.write('-> %s (newdate %s)\n' % (action, newdate))
# if deleting, return an empty list
replstmts = None
if action == 'delete':
replstmts = []
# replace and insert logic is quite similar:
# * if replacing, modify this statement and return a list containing
# only it
# * if inserting, create a new statement and return a list containing
# the new and the original statement
elif action == 'replace' or action == 'insert':
if action == 'replace':
revstmt = stmt
revstmt.arg = newdate
else:
revstmt = statements.new_statement(stmt.top, stmt.parent, None,
'revision', newdate)
other_keywords = set(opts.keys()) - {'olddate', 'newdate'}
for keyword in other_keywords:
update_or_add_stmt(revstmt, keyword, opts[keyword])
if action == 'replace':
replstmts = [revstmt]
else:
replstmts = [revstmt, stmt]
revision_done = True
#sys.stderr.write(
# '= %s\n' % ([s.arg for s in replstmts] if replstmts else None))
return replstmts, revision_done
def get_arg_value(arg, currarg=None):
if arg is None or arg[0] not in ['%', '@']:
return arg, True
else:
replace = False
try:
argval = ''
specs = arg.split('+')
for spec in specs:
if argval != '':
argval += '\n\n'
if spec[0] not in ['%', '@']:
argval += spec
elif spec[0] == '%':
if spec == '%SUMMARY':
summary = get_arg_summary(currarg)
if summary:
argval += summary
elif spec.startswith('%SUBST/'):
(ignore, old, new) = spec.split('/')
if currarg is None:
if argval == '':
argval = None
else:
argval = currarg.replace(old, new)
replace = True
elif spec == '%DELETE':
argval = ''
replace = True
else:
argval += spec
elif spec[0] == '@':
argval += open(spec[1:], 'r').read().rstrip()
return argval, replace
except IOError as e:
raise error.EmitError(str(e))
def get_arg_summary(arg):
lines = arg.splitlines()
summary = ''
prev = ''
discard_prev = False
for line in lines:
if line.strip().startswith('Copyright '):
if prev.strip() == '':
discard_prev = True
break
if prev != '':
summary += prev
prev = ''
if summary != '':
prev += '\n'
prev += line
if prev and not discard_prev:
summary += prev
return summary if summary else 'TBD'
# XXX should insert in canonical order; currently (apart from the hack noted
# below) just appending; should look into doing the same as yang.py, which
# does: substmts = grammar.sort_canonical(stmt.keyword, stmt.substmts)
def update_or_add_stmt(stmt, keyword, arg, index=None):
child = stmt.search_one(keyword)
currarg = child.arg if child else None
(argval, replace) = get_arg_value(arg, currarg)
if argval is None:
child = None
elif child:
if not replace and child.arg and child.arg != argval and child.arg \
!= 'TBD':
sys.stderr.write('%s: not replacing existing %s %r with %r\n' % (
child.pos, keyword, child.arg, argval))
else:
child.arg = argval
else:
child = statements.new_statement(stmt.top, stmt, None, keyword, argval)
if index is None:
index = len(stmt.substmts)
# XXX this hack ensures that 'reference' is always last
if index > 0 and stmt.substmts[index - 1].keyword == 'reference':
index -= 1
stmt.substmts.insert(index, child)
return child
def delete_stmt(parent, stmt):
if stmt in parent.substmts:
idx = parent.substmts.index(stmt)
del parent.substmts[idx]
del stmt
|
isc
|
c7fe489f489e7470546c470f3f6a8e4f
| 36.296188
| 79
| 0.551109
| 4.419041
| false
| false
| false
| false
|
mbj4668/pyang
|
pyang/syntax.py
|
1
|
15064
|
"""Description of YANG & YIN syntax."""
import os
import re
import shlex
import sys
import datetime
### Regular expressions - constraints on arguments
# keywords and identifiers
identifier = r"[_A-Za-z][._\-A-Za-z0-9]*"
prefix = identifier
keyword = '((' + prefix + '):)?(' + identifier + ')'
comment = r'(/\*([^*]|[\r\n\s]|(\*+([^*/]|[\r\n\s])))*\*+/)|(//.*)|(/\*.*)'
# no group version of keyword
keyword_ng = '(?:(' + prefix + '):)?(?:' + identifier + ')'
re_keyword = re.compile(keyword)
re_keyword_start = re.compile('^' + keyword)
re_comment = re.compile(comment)
pos_integer = r"[1-9][0-9]*"
nonneg_integer = r"(0|([1-9][0-9]*))"
integer_ = r"[+-]?" + nonneg_integer
decimal_ = integer_ + r"(\.[0-9]+)?"
length_str = r'((min|max|[0-9]+)\s*' \
r'(\.\.\s*' \
r'(min|max|[0-9]+)\s*)?)'
length_expr = length_str + r'(\|\s*' + length_str + r')*'
re_length_part = re.compile(length_str)
range_str = r'((min|max|((\+|\-)?[0-9]+(\.[0-9]+)?))\s*' \
r'(\.\.\s*' \
r'(min|max|(\+|\-)?[0-9]+(\.[0-9]+)?)\s*)?)'
range_expr = range_str + r'(\|\s*' + range_str + r')*'
re_range_part = re.compile(range_str)
re_identifier = re.compile("^" + identifier + "$")
# path and unique
node_id = keyword_ng
rel_path_keyexpr = r"(\.\./)+(" + node_id + "/)*" + node_id
path_key_expr = r"(current\s*\(\s*\)/" + rel_path_keyexpr + ")"
path_equality_expr = node_id + r"\s*=\s*" + path_key_expr
path_predicate = r"\s*\[\s*" + path_equality_expr + r"\s*\]\s*"
absolute_path_arg = "(?:/" + node_id + "(" + path_predicate + ")*)+"
descendant_path_arg = node_id + "(" + path_predicate + ")*" + \
"(?:" + absolute_path_arg + ")?"
relative_path_arg = r"(\.\./)*" + descendant_path_arg
deref_path_arg = r"deref\s*\(\s*(?:" + relative_path_arg + \
r")\s*\)/\.\./" + relative_path_arg
path_arg = "(" + absolute_path_arg + "|" + relative_path_arg + "|" + \
deref_path_arg + ")"
absolute_schema_nodeid = "(/" + node_id + ")+"
descendant_schema_nodeid = node_id + "(" + absolute_schema_nodeid + ")?"
schema_nodeid = "("+absolute_schema_nodeid+"|"+descendant_schema_nodeid+")"
unique_arg = descendant_schema_nodeid + \
r"(\s+" + descendant_schema_nodeid + r")*"
key_arg = node_id + r"(\s+" + node_id + r")*"
re_schema_node_id_part = re.compile('/' + keyword)
# URI - RFC 3986, Appendix A
scheme = "[A-Za-z][-+.A-Za-z0-9]*"
unreserved = "[-._~A-Za-z0-9]"
pct_encoded = "%[0-9A-F]{2}"
sub_delims = "[!$&'()*+,;=]"
pchar = ("(" + unreserved + "|" + pct_encoded + "|" +
sub_delims + "|[:@])")
segment = pchar + "*"
segment_nz = pchar + "+"
userinfo = ("(" + unreserved + "|" + pct_encoded + "|" +
sub_delims + "|:)*")
dec_octet = "([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])"
ipv4address = "(" + dec_octet + r"\.){3}" + dec_octet
h16 = "[0-9A-F]{1,4}"
ls32 = "(" + h16 + ":" + h16 + "|" + ipv4address + ")"
ipv6address = (
"((" + h16 + ":){6}" + ls32 +
"|::(" + h16 + ":){5}" + ls32 +
"|(" + h16 + ")?::(" + h16 + ":){4}" + ls32 +
"|((" + h16 + ":)?" + h16 + ")?::(" + h16 + ":){3}" + ls32 +
"|((" + h16 + ":){,2}" + h16 + ")?::(" + h16 + ":){2}" + ls32 +
"|((" + h16 + ":){,3}" + h16 + ")?::" + h16 + ":" + ls32 +
"|((" + h16 + ":){,4}" + h16 + ")?::" + ls32 +
"|((" + h16 + ":){,5}" + h16 + ")?::" + h16 +
"|((" + h16 + ":){,6}" + h16 + ")?::)")
ipvfuture = r"v[0-9A-F]+\.(" + unreserved + "|" + sub_delims + "|:)+"
ip_literal = r"\[(" + ipv6address + "|" + ipvfuture + r")\]"
reg_name = "(" + unreserved + "|" + pct_encoded + "|" + sub_delims + ")*"
host = "(" + ip_literal + "|" + ipv4address + "|" + reg_name + ")"
port = "[0-9]*"
authority = "(" + userinfo + "@)?" + host + "(:" + port + ")?"
path_abempty = "(/" + segment + ")*"
path_absolute = "/(" + segment_nz + "(/" + segment + ")*)?"
path_rootless = segment_nz + "(/" + segment + ")*"
path_empty = pchar + "{0}"
hier_part = ("(" + "//" + authority + path_abempty + "|" +
path_absolute + "|" + path_rootless + "|" + path_empty + ")")
query = "(" + pchar + "|[/?])*"
fragment = query
uri = (scheme + ":" + hier_part + r"(\?" + query + ")?" +
"(#" + fragment + ")?")
# Date
date = r"([1-2][0-9]{3})-(0[1-9]|1[012])-(0[1-9]|[12][0-9]|3[01])"
re_nonneg_integer = re.compile("^" + nonneg_integer + "$")
re_integer = re.compile("^" + integer_ + "$")
re_decimal = re.compile("^" + decimal_ + "$")
re_uri = re.compile("^" + uri + "$")
re_boolean = re.compile(r"^(true|false)$")
re_version = re.compile(r"^(1|(1\.1))$")
re_date = re.compile("^" + date +"$")
re_status = re.compile(r"^(current|obsolete|deprecated)$")
re_key = re.compile("^" + key_arg + "$")
re_length = re.compile("^" + length_expr + "$")
re_range = re.compile("^" + range_expr + "$")
re_pos_integer = re.compile(r"^(unbounded|" + pos_integer + r")$")
re_ordered_by = re.compile(r"^(user|system)$")
re_modifier = re.compile(r"^(invert-match)$")
re_node_id = re.compile("^" + node_id + "$")
re_path = re.compile("^" + path_arg + "$")
re_absolute_path = re.compile("^" + absolute_path_arg + "$")
re_unique = re.compile("^" + unique_arg + "$")
re_schema_nodeid = re.compile("^" + schema_nodeid + "$")
re_absolute_schema_nodeid = re.compile("^" + absolute_schema_nodeid + "$")
re_descendant_schema_nodeid = re.compile("^" + descendant_schema_nodeid + "$")
re_deviate = re.compile(r"^(add|delete|replace|not-supported)$")
# Not part of YANG syntax per se but useful for pyang in several places
re_filename = re.compile(
r"^(?:.*" + re.escape(os.sep) + r")?" + # ignore all before os.sep
r"([^@]*?)" + # putative module name
r"(?:@([^.]*?))?" + # putative revision
r"(?:\.yang|\.yin)*" + # [email protected] ?
r"\.(yang|yin)$") # actual final extension
arg_type_map = {
"identifier": lambda s: re_identifier.search(s) is not None,
"non-negative-integer": lambda s: re_nonneg_integer.search(s) is not None,
"integer": lambda s: re_integer.search(s) is not None,
"uri": lambda s: re_uri.search(s) is not None,
"boolean": lambda s: re_boolean.search(s) is not None,
"version": lambda s: re_version.search(s) is not None,
"date": lambda s: chk_date_arg(s),
"status-arg": lambda s: re_status.search(s) is not None,
"key-arg": lambda s: re_key.search(s) is not None,
"length-arg": lambda s: re_length.search(s) is not None,
"range-arg": lambda s: re_range.search(s) is not None,
"max-value": lambda s: re_pos_integer.search(s) is not None,
"ordered-by-arg": lambda s: re_ordered_by.search(s) is not None,
"modifier-arg": lambda s: re_modifier.search(s) is not None,
"identifier-ref": lambda s: re_node_id.search(s) is not None,
"path-arg": lambda s: re_path.search(s) is not None,
"absolute-path-arg": lambda s: re_absolute_path.search(s) is not None,
"unique-arg": lambda s: re_unique.search(s) is not None,
"absolute-schema-nodeid": lambda s: \
re_absolute_schema_nodeid.search(s) is not None,
"descendant-schema-nodeid": lambda s: \
re_descendant_schema_nodeid.search(s) is not None,
"schema-nodeid": lambda s: \
re_schema_nodeid.search(s) is not None,
"enum-arg": lambda s: chk_enum_arg(s),
"fraction-digits-arg": lambda s: chk_fraction_digits_arg(s),
"if-feature-expr": lambda s: chk_if_feature_expr(s),
"deviate-arg": lambda s: re_deviate.search(s) is not None,
"_comment": lambda s: re_comment.search(s) is not None,
}
"""Argument type definitions.
Regular expressions for all argument types except plain string that
are checked directly by the parser.
"""
def chk_date_arg(s):
"""Checks if the string `s` is a valid date string.
Return True of False."""
match = re_date.match(s)
if match is None:
return False
comp = match.groups()
try:
datetime.date(int(comp[0]), int(comp[1]), int(comp[2]))
return True
except ValueError:
return False
def chk_enum_arg(s):
"""Checks if the string `s` is a valid enum string.
Return True or False."""
if len(s) == 0 or s[0].isspace() or s[-1].isspace():
return False
else:
return True
def chk_fraction_digits_arg(s):
"""Checks if the string `s` is a valid fraction-digits argument.
Return True or False."""
try:
v = int(s)
if v >= 1 and v <= 18:
return True
else:
return False
except ValueError:
return False
def chk_if_feature_expr(s):
return parse_if_feature_expr(s) is not None
# if-feature-expr = "(" if-feature-expr ")" /
# if-feature-expr sep boolean-operator sep
# if-feature-expr /
# not-keyword sep if-feature-expr /
# identifier-ref-arg
#
# Rewrite to:
# x = y ("and"/"or" y)*
# y = "not" x /
# "(" x ")"
# identifier
#
# Expr :: ('not', Expr, None)
# | ('and'/'or', Expr, Expr)
# | Identifier
def parse_if_feature_expr(s):
try:
# Encoding to ascii works for valid if-feature-exprs, since all
# pars are YANG identifiers (or the boolean keywords).
# The reason for this fix is that in Python < 2.7.3, shlex would return
# erroneous tokens if a unicode string was passed.
# Also, shlex uses cStringIO internally which doesn't handle unicode
# characters outside the ascii range anyway.
if sys.version < '3':
sx = shlex.shlex(s.encode("ascii"))
else:
sx = shlex.shlex(s)
except UnicodeEncodeError:
return None
sx.wordchars += ":-" # need to handle prefixes and '-' in the name
operators = [None]
operands = []
precedence = {'not':3, 'and':2, 'or':1, None:0}
def x():
y()
tok = sx.get_token()
while tok in ('and', 'or'):
push_operator(tok)
y()
tok = sx.get_token()
sx.push_token(tok)
while operators[-1] is not None:
pop_operator()
def y():
tok = sx.get_token()
if tok == 'not':
push_operator(tok)
x()
elif tok == '(':
operators.append(None)
x()
tok = sx.get_token()
if tok != ')':
raise ValueError
operators.pop()
elif is_identifier(tok):
operands.append(tok)
else:
raise ValueError
def push_operator(op):
while op_gt(operators[-1], op):
pop_operator()
operators.append(op)
def pop_operator():
op = operators.pop()
if op == 'not':
operands.append((op, operands.pop(), None))
else:
operands.append((op, operands.pop(), operands.pop()))
def op_gt(op1, op2):
return precedence[op1] > precedence[op2]
def is_identifier(tok):
return re_node_id.search(tok) is not None
try:
x()
if sx.get_token() != '':
raise ValueError
return operands[-1]
except ValueError:
return None
def add_arg_type(arg_type, regexp):
"""Add a new arg_type to the map.
Used by extension plugins to register their own argument types."""
arg_type_map[arg_type] = regexp
# keyword argument-name yin-element
yin_map = \
{'action': ('name', False),
'anydata': ('name', False),
'anyxml': ('name', False),
'argument': ('name', False),
'augment': ('target-node', False),
'base': ('name', False),
'belongs-to': ('module', False),
'bit': ('name', False),
'case': ('name', False),
'choice': ('name', False),
'config': ('value', False),
'contact': ('text', True),
'container': ('name', False),
'default': ('value', False),
'description': ('text', True),
'deviate': ('value', False),
'deviation': ('target-node', False),
'enum': ('name', False),
'error-app-tag': ('value', False),
'error-message': ('value', True),
'extension': ('name', False),
'feature': ('name', False),
'fraction-digits': ('value', False),
'grouping': ('name', False),
'identity': ('name', False),
'if-feature': ('name', False),
'import': ('module', False),
'include': ('module', False),
'input': (None, None),
'key': ('value', False),
'leaf': ('name', False),
'leaf-list': ('name', False),
'length': ('value', False),
'list': ('name', False),
'mandatory': ('value', False),
'max-elements': ('value', False),
'min-elements': ('value', False),
'modifier': ('value', False),
'module': ('name', False),
'must': ('condition', False),
'namespace': ('uri', False),
'notification': ('name', False),
'ordered-by': ('value', False),
'organization': ('text', True),
'output': (None, None),
'path': ('value', False),
'pattern': ('value', False),
'position': ('value', False),
'presence': ('value', False),
'prefix': ('value', False),
'range': ('value', False),
'reference': ('text', True),
'refine': ('target-node', False),
'require-instance': ('value', False),
'revision': ('date', False),
'revision-date': ('date', False),
'rpc': ('name', False),
'status': ('value', False),
'submodule': ('name', False),
'type': ('name', False),
'typedef': ('name', False),
'unique': ('tag', False),
'units': ('name', False),
'uses': ('name', False),
'value': ('value', False),
'when': ('condition', False),
'yang-version': ('value', False),
'yin-element': ('value', False),
}
"""Mapping of statements to the YIN representation of their arguments.
The values are pairs whose first component specifies whether the
argument is stored in a subelement and the second component is the
name of the attribute or subelement carrying the argument. See YANG
specification.
"""
|
isc
|
433a69ad9ffd4ba16f37c8f4cb504c13
| 37.925065
| 79
| 0.497411
| 3.225696
| false
| false
| false
| false
|
mbj4668/pyang
|
pyang/translators/yin.py
|
1
|
6251
|
"""YIN output plugin"""
from xml.sax.saxutils import quoteattr
from xml.sax.saxutils import escape
import optparse
import re
from .. import plugin
from .. import util
from .. import grammar
from .. import syntax
from .. import statements
yin_namespace = "urn:ietf:params:xml:ns:yang:yin:1"
def pyang_plugin_init():
plugin.register_plugin(YINPlugin())
class YINPlugin(plugin.PyangPlugin):
def add_opts(self, optparser):
optlist = [
optparse.make_option("--yin-canonical",
dest="yin_canonical",
action="store_true",
help="Print in canonical order"),
optparse.make_option("--yin-pretty-strings",
dest="yin_pretty_strings",
action="store_true",
help="Pretty print strings"),
]
g = optparser.add_option_group("YIN output specific options")
g.add_options(optlist)
def add_output_format(self, fmts):
fmts['yin'] = self
def emit(self, ctx, modules, fd):
module = modules[0]
emit_yin(ctx, module, fd)
def emit_yin(ctx, module, fd):
fd.write('<?xml version="1.0" encoding="UTF-8"?>\n')
fd.write('<%s name="%s"\n' % (module.keyword, module.arg))
fd.write(' ' * len(module.keyword) + ' xmlns="%s"' % yin_namespace)
prefix = module.search_one('prefix')
if prefix is not None:
namespace = module.search_one('namespace')
fd.write('\n')
fd.write(' ' * len(module.keyword))
fd.write(' xmlns:' + prefix.arg + '=' +
quoteattr(namespace.arg))
else:
belongs_to = module.search_one('belongs-to')
if belongs_to is not None:
prefix = belongs_to.search_one('prefix')
if prefix is not None:
# read the parent module in order to find the namespace uri
res = ctx.read_module(belongs_to.arg, extra={'no_include':True})
if res is not None:
namespace = res.search_one('namespace')
if namespace is None or namespace.arg is None:
pass
else:
# success - namespace found
fd.write('\n')
fd.write(' ' * len(module.keyword))
fd.write(' xmlns:' + prefix.arg + '=' +
quoteattr(namespace.arg))
for imp in module.search('import'):
prefix = imp.search_one('prefix')
if prefix is not None:
rev = None
r = imp.search_one('revision-date')
if r is not None:
rev = r.arg
mod = statements.modulename_to_module(module, imp.arg, rev)
if mod is not None:
ns = mod.search_one('namespace')
if ns is not None:
fd.write('\n')
fd.write(' ' * len(module.keyword))
fd.write(' xmlns:' + prefix.arg + '=' +
quoteattr(ns.arg))
fd.write('>\n')
if ctx.opts.yin_canonical:
substmts = grammar.sort_canonical(module.keyword, module.substmts)
else:
substmts = module.substmts
for s in substmts:
emit_stmt(ctx, module, s, fd, ' ', ' ')
fd.write('</%s>\n' % module.keyword)
def emit_stmt(ctx, module, stmt, fd, indent, indentstep):
if util.is_prefixed(stmt.raw_keyword):
# this is an extension. need to find its definition
(prefix, identifier) = stmt.raw_keyword
tag = prefix + ':' + identifier
if stmt.i_extension is not None:
ext_arg = stmt.i_extension.search_one('argument')
if ext_arg is not None:
yin_element = ext_arg.search_one('yin-element')
if yin_element is not None and yin_element.arg == 'true':
argname = prefix + ':' + ext_arg.arg
argiselem = True
else:
# explicit false or no yin-element given
argname = ext_arg.arg
argiselem = False
else:
argiselem = False
argname = None
else:
argiselem = False
argname = None
else:
(argname, argiselem) = syntax.yin_map[stmt.raw_keyword]
tag = stmt.raw_keyword
if argiselem is False or argname is None:
if argname is None:
attr = ''
else:
attr = ' ' + argname + '=' + quoteattr(stmt.arg)
if len(stmt.substmts) == 0:
fd.write(indent + '<' + tag + attr + '/>\n')
else:
fd.write(indent + '<' + tag + attr + '>\n')
for s in stmt.substmts:
emit_stmt(ctx, module, s, fd, indent + indentstep,
indentstep)
fd.write(indent + '</' + tag + '>\n')
else:
fd.write(indent + '<' + tag + '>\n')
if ctx.opts.yin_pretty_strings:
# since whitespace is significant in XML, the current
# code is strictly speaking incorrect. But w/o the whitespace,
# it looks too ugly.
fd.write(indent + indentstep + '<' + argname + '>\n')
fd.write(fmt_text(indent + indentstep + indentstep, stmt.arg))
fd.write('\n' + indent + indentstep + '</' + argname + '>\n')
else:
fd.write(indent + indentstep + '<' + argname + '>' + \
escape(stmt.arg) + \
'</' + argname + '>\n')
if ctx.opts.yin_canonical:
substmts = grammar.sort_canonical(stmt.keyword, stmt.substmts)
else:
substmts = stmt.substmts
for s in substmts:
emit_stmt(ctx, module, s, fd, indent + indentstep, indentstep)
fd.write(indent + '</' + tag + '>\n')
def fmt_text(indent, data):
res = []
for line in re.split("(\n)", escape(data)):
if line == '':
continue
if line == '\n':
res.extend(line)
else:
res.extend(indent + line)
return ''.join(res)
|
isc
|
9a2d0ef23d5688834407018ea4306364
| 37.58642
| 80
| 0.503279
| 3.976463
| false
| false
| false
| false
|
rdegges/django-twilio
|
test_project/settings.py
|
1
|
6114
|
# -*- coding: utf-8 -*-
import sys
# Django settings for test_project project.
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import django
import packaging.version
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
DEBUG = True
ADMINS = (
# ('Your Name', '[email protected]'),
)
MANAGERS = ADMINS
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': 'db.sqlite3',
}
}
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# On Unix systems, a value of None will cause Django to use the same
# timezone as the operating system.
# If running in a Windows environment this must be set to the same as your
# system time zone.
TIME_ZONE = 'America/Los_Angeles'
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'en-us'
SITE_ID = 1
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
# If you set this to False, Django will not format dates, numbers and
# calendars according to the current locale
USE_L10N = True
# Absolute filesystem path to the directory that will hold user-uploaded files.
# Example: "/home/media/media.lawrence.com/media/"
MEDIA_ROOT = ''
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash.
# Examples: "http://media.lawrence.com/media/", "http://example.com/media/"
MEDIA_URL = ''
# Absolute path to the directory static files should be collected to.
# Don't put anything in this directory yourself; store your static files
# in apps' "static/" subdirectories and in STATICFILES_DIRS.
# Example: "/home/media/media.lawrence.com/static/"
STATIC_ROOT = ''
# URL prefix for static files.
# Example: "http://media.lawrence.com/static/"
STATIC_URL = '/static/'
# URL prefix for admin static files -- CSS, JavaScript and images.
# Make sure to use a trailing slash.
# Examples: "http://foo.com/static/admin/", "/static/admin/".
ADMIN_MEDIA_PREFIX = '/static/admin/'
# Additional locations of static files
STATICFILES_DIRS = (
# Put strings here, like "/home/html/static" or "C:/www/django/static".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
)
# List of finder classes that know how to find static files in
# various locations.
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
)
# Make this unique, and don't share it with anybody.
SECRET_KEY = 'j1wd@qqodn-r9h&o@0jj!uw^#pm5wcdu2^cdsax=hm+-mk705p'
# This is a temporary shim to allow the old style MIDDLEWARE_CLASSES to work
# We will forge a plan to remove at least the unsupported versions soon.
# Django 2.0 is the future, but 1.11 is still supported.
# This test_project though is simple enough that the restrictions are small.
if packaging.version.Version(django.__version__) < packaging.version.Version('2.0'):
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
)
else:
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'test_project.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'templates')]
,
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.admin',
'django.contrib.admindocs',
# django-twilio, of course!
'django_twilio',
)
# A sample logging configuration. The only tangible logging
# performed by this configuration is to send an email to
# the site admins on every HTTP 500 error.
# See http://docs.djangoproject.com/en/dev/topics/logging for
# more details on how to customize your logging configuration.
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'handlers': {
'mail_admins': {
'level': 'ERROR',
'class': 'django.utils.log.AdminEmailHandler'
}
},
'loggers': {
'django.request': {
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': True,
},
}
}
# django-twilio account credentials. These fields are required to use the REST
# API (initiate outbound calls and SMS messages).
TWILIO_ACCOUNT_SID = 'ACXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX'
TWILIO_AUTH_TOKEN = 'YYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYY'
# The default callerid will be used for all outgoing phone calls and SMS
# messages if not explicitly specified. This number must be previously
# validated with twilio in order to work. See
# https://www.twilio.com/user/account/phone-numbers#
TWILIO_DEFAULT_CALLERID = 'NNNNNNNNNN'
|
unlicense
|
69e54dec409d3927e831977f003e6393
| 31.870968
| 84
| 0.697089
| 3.73945
| false
| false
| false
| false
|
mozilla-services/buildhub
|
jobs/tests/test_lambda_s3_event_functional.py
|
1
|
4418
|
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, you can obtain one at http://mozilla.org/MPL/2.0/.
import unittest
import os
import json
import kinto_http
from decouple import config
from buildhub import lambda_s3_event
here = os.path.dirname(__file__)
server = config('SERVER_URL', default='http://localhost:8888/v1')
bid = 'build-hub'
cid = 'releases'
class LambdaTest(unittest.TestCase):
def setUp(self):
filename = os.path.join(here, 'data', 's3-event-simple.json')
self.event = json.load(open(filename, 'r'))
def test_load_into_kinto(self):
lambda_s3_event.lambda_handler(self.event, None)
rid = 'firefox_54-0_win64_fr'
client = kinto_http.Client(server_url=server)
record = client.get_record(bucket=bid, collection=cid, id=rid)['data']
record.pop('last_modified')
assert record == {
'id': 'firefox_54-0_win64_fr',
'source': {
'repository': (
'https://hg.mozilla.org/releases/mozilla-release'
),
'revision': 'e832ed037a3c23004be73178e546d240e57b6ee1',
'product': 'firefox',
'tree': 'releases/mozilla-release'
},
'download': {
'mimetype': 'application/msdos-windows',
'url': 'https://archive.mozilla.org/pub/firefox/releases/'
'54.0/win64/fr/Firefox Setup 54.0.exe',
'size': 51001024,
'date': '2017-08-08T17:06:52Z'
},
'target': {
'locale': 'fr',
'platform': 'win64',
'os': 'win',
'version': '54.0',
'channel': 'release'
},
'build': {
'as': 'ml64.exe',
'cc': (
'c:/builds/moz2_slave/m-rel-w64-00000000000000000000/'
'build/src/vs2015u3/VC/bin/amd64/cl.exe'
),
'cxx': (
'c:/builds/moz2_slave/m-rel-w64-00000000000000000000/'
'build/src/vs2015u3/VC/bin/amd64/cl.exe'
),
'date': '2017-06-08T10:58:25Z',
'host': 'x86_64-pc-mingw32',
'id': '20170608105825',
'number': 3,
'target': 'x86_64-pc-mingw32'
}
}
rid = 'firefox_nightly_2017-10-29-22-01-12_58-0a1_linux-i686_en-us'
record = client.get_record(bucket=bid, collection=cid, id=rid)['data']
record.pop('last_modified')
assert record == {
'build': {
'as': '$(CC)',
'cc': (
'/usr/bin/ccache '
'/builds/worker/workspace/build/src/gcc/bin/gcc -m32 '
'-march=pentium-m -std=gnu99'
),
'cxx': (
'/usr/bin/ccache '
'/builds/worker/workspace/build/src/gcc/bin/g++ -m32 '
'-march=pentium-m -std=gnu++11'
),
'date': '2017-10-29T22:01:12Z',
'host': 'i686-pc-linux-gnu',
'id': '20171029220112',
'target': 'i686-pc-linux-gnu',
},
'download': {
'date': '2017-10-29T17:06:52Z',
'mimetype': 'application/x-bzip2',
'size': 51001024,
'url': (
'https://archive.mozilla.org/pub/firefox/nightly/2017/10/'
'2017-10-29-22-01-12-mozilla-central/firefox-58.0a1.'
'en-US.linux-i686.tar.bz2'
)
},
'id': (
'firefox_nightly_2017-10-29-22-01-12_58-0a1_linux-i686_en-us'
),
'source': {
'product': 'firefox',
'repository': 'https://hg.mozilla.org/mozilla-central',
'revision': 'd3910b7628b8066d3f30d58b17b5824b05768854',
'tree': 'mozilla-central'
},
'target': {
'channel': 'nightly',
'locale': 'en-US',
'os': 'linux',
'platform': 'linux-i686',
'version': '58.0a1'
}
}
|
mpl-2.0
|
1517965c5c64a352e8b73259eff50045
| 34.629032
| 78
| 0.463105
| 3.571544
| false
| false
| false
| false
|
pikepdf/pikepdf
|
src/pikepdf/models/encryption.py
|
1
|
5651
|
# SPDX-FileCopyrightText: 2022 James R. Barlow
# SPDX-License-Identifier: MPL-2.0
"""For managing PDF encryption."""
from __future__ import annotations
import sys
from typing import TYPE_CHECKING, Any, NamedTuple, cast
if sys.version_info >= (3, 8):
from typing import Literal
else:
from typing_extensions import Literal # pragma: no cover
if TYPE_CHECKING:
from pikepdf._qpdf import EncryptionMethod
class Permissions(NamedTuple):
"""
Stores the user-level permissions for an encrypted PDF.
A compliant PDF reader/writer should enforce these restrictions on people
who have the user password and not the owner password. In practice, either
password is sufficient to decrypt all document contents. A person who has
the owner password should be allowed to modify the document in any way.
pikepdf does not enforce the restrictions in any way; it is up to application
developers to enforce them as they see fit.
Unencrypted PDFs implicitly have all permissions allowed. Permissions can
only be changed when a PDF is saved.
"""
accessibility: bool = True
"""Can users use screen readers and accessibility tools to read the PDF?"""
extract: bool = True
"""Can users extract contents?"""
modify_annotation: bool = True
"""Can users modify annotations?"""
modify_assembly: bool = False
"""Can users arrange document contents?"""
modify_form: bool = True
"""Can users fill out forms?"""
modify_other: bool = True
"""Can users modify the document?"""
print_lowres: bool = True
"""Can users print the document at low resolution?"""
print_highres: bool = True
"""Can users print the document at high resolution?"""
DEFAULT_PERMISSIONS = Permissions()
class EncryptionInfo:
"""
Reports encryption information for an encrypted PDF.
This information may not be changed, except when a PDF is saved.
This object is not used to specify the encryption settings to save
a PDF, due to non-overlapping information requirements.
"""
def __init__(self, encdict: dict[str, Any]):
"""
Initialize EncryptionInfo.
Generally pikepdf will initialize and return it.
Args:
encdict: Python dictionary containing encryption settings.
"""
self._encdict = encdict
@property
def R(self) -> int:
"""Revision number of the security handler."""
return int(self._encdict['R'])
@property
def V(self) -> int:
"""Version of PDF password algorithm."""
return int(self._encdict['V'])
@property
def P(self) -> int:
"""Return encoded permission bits.
See :meth:`Pdf.allow` instead.
"""
return int(self._encdict['P'])
@property
def stream_method(self) -> EncryptionMethod:
"""Encryption method used to encode streams."""
return cast('EncryptionMethod', self._encdict['stream'])
@property
def string_method(self) -> EncryptionMethod:
"""Encryption method used to encode strings."""
return cast('EncryptionMethod', self._encdict['string'])
@property
def file_method(self) -> EncryptionMethod:
"""Encryption method used to encode the whole file."""
return cast('EncryptionMethod', self._encdict['file'])
@property
def user_password(self) -> bytes:
"""If possible, return the user password.
The user password can only be retrieved when a PDF is opened
with the owner password and when older versions of the
encryption algorithm are used.
The password is always returned as ``bytes`` even if it has
a clear Unicode representation.
"""
return bytes(self._encdict['user_passwd'])
@property
def encryption_key(self) -> bytes:
"""Return the RC4 or AES encryption key used for this file."""
return bytes(self._encdict['encryption_key'])
@property
def bits(self) -> int:
"""Return the number of bits in the encryption algorithm.
e.g. if the algorithm is AES-256, this returns 256.
"""
return len(self._encdict['encryption_key']) * 8
class Encryption(NamedTuple):
"""Specify the encryption settings to apply when a PDF is saved."""
owner: str = ''
"""The owner password to use. This allows full control
of the file. If blank, the PDF will be encrypted and
present as "(SECURED)" in PDF viewers. If the owner password
is blank, the user password should be as well."""
user: str = ''
"""The user password to use. With this password, some
restrictions will be imposed by a typical PDF reader.
If blank, the PDF can be opened by anyone, but only modified
as allowed by the permissions in ``allow``."""
R: Literal[2, 3, 4, 5, 6] = 6
"""Select the security handler algorithm to use. Choose from:
``2``, ``3``, ``4`` or ``6``. By default, the highest version of
is selected (``6``). ``5`` is a deprecated algorithm that should
not be used."""
allow: Permissions = DEFAULT_PERMISSIONS
"""The permissions to set.
If omitted, all permissions are granted to the user."""
aes: bool = True
"""If True, request the AES algorithm. If False, use RC4.
If omitted, AES is selected whenever possible (R >= 4)."""
metadata: bool = True
"""If True, also encrypt the PDF metadata. If False,
metadata is not encrypted. Reading document metadata without
decryption may be desirable in some cases. Requires ``aes=True``.
If omitted, metadata is encrypted whenever possible."""
|
mpl-2.0
|
21e03933294d545d53727875d93a860f
| 31.107955
| 81
| 0.660414
| 4.418296
| false
| false
| false
| false
|
mail-in-a-box/mailinabox
|
management/dns_update.py
|
1
|
46993
|
#!/usr/local/lib/mailinabox/env/bin/python
# Creates DNS zone files for all of the domains of all of the mail users
# and mail aliases and restarts nsd.
########################################################################
import sys, os, os.path, urllib.parse, datetime, re, hashlib, base64
import ipaddress
import rtyaml
import dns.resolver
from utils import shell, load_env_vars_from_file, safe_domain_name, sort_domains
from ssl_certificates import get_ssl_certificates, check_certificate
# From https://stackoverflow.com/questions/3026957/how-to-validate-a-domain-name-using-regex-php/16491074#16491074
# This regular expression matches domain names according to RFCs, it also accepts fqdn with an leading dot,
# underscores, as well as asteriks which are allowed in domain names but not hostnames (i.e. allowed in
# DNS but not in URLs), which are common in certain record types like for DKIM.
DOMAIN_RE = "^(?!\-)(?:[*][.])?(?:[a-zA-Z\d\-_]{0,62}[a-zA-Z\d_]\.){1,126}(?!\d+)[a-zA-Z\d_]{1,63}(\.?)$"
def get_dns_domains(env):
# Add all domain names in use by email users and mail aliases, any
# domains we serve web for (except www redirects because that would
# lead to infinite recursion here) and ensure PRIMARY_HOSTNAME is in the list.
from mailconfig import get_mail_domains
from web_update import get_web_domains
domains = set()
domains |= set(get_mail_domains(env))
domains |= set(get_web_domains(env, include_www_redirects=False))
domains.add(env['PRIMARY_HOSTNAME'])
return domains
def get_dns_zones(env):
# What domains should we create DNS zones for? Never create a zone for
# a domain & a subdomain of that domain.
domains = get_dns_domains(env)
# Exclude domains that are subdomains of other domains we know. Proceed
# by looking at shorter domains first.
zone_domains = set()
for domain in sorted(domains, key=lambda d : len(d)):
for d in zone_domains:
if domain.endswith("." + d):
# We found a parent domain already in the list.
break
else:
# 'break' did not occur: there is no parent domain.
zone_domains.add(domain)
# Make a nice and safe filename for each domain.
zonefiles = []
for domain in zone_domains:
zonefiles.append([domain, safe_domain_name(domain) + ".txt"])
# Sort the list so that the order is nice and so that nsd.conf has a
# stable order so we don't rewrite the file & restart the service
# meaninglessly.
zone_order = sort_domains([ zone[0] for zone in zonefiles ], env)
zonefiles.sort(key = lambda zone : zone_order.index(zone[0]) )
return zonefiles
def do_dns_update(env, force=False):
# Write zone files.
os.makedirs('/etc/nsd/zones', exist_ok=True)
zonefiles = []
updated_domains = []
for (domain, zonefile, records) in build_zones(env):
# The final set of files will be signed.
zonefiles.append((domain, zonefile + ".signed"))
# See if the zone has changed, and if so update the serial number
# and write the zone file.
if not write_nsd_zone(domain, "/etc/nsd/zones/" + zonefile, records, env, force):
# Zone was not updated. There were no changes.
continue
# Mark that we just updated this domain.
updated_domains.append(domain)
# Sign the zone.
#
# Every time we sign the zone we get a new result, which means
# we can't sign a zone without bumping the zone's serial number.
# Thus we only sign a zone if write_nsd_zone returned True
# indicating the zone changed, and thus it got a new serial number.
# write_nsd_zone is smart enough to check if a zone's signature
# is nearing expiration and if so it'll bump the serial number
# and return True so we get a chance to re-sign it.
sign_zone(domain, zonefile, env)
# Write the main nsd.conf file.
if write_nsd_conf(zonefiles, list(get_custom_dns_config(env)), env):
# Make sure updated_domains contains *something* if we wrote an updated
# nsd.conf so that we know to restart nsd.
if len(updated_domains) == 0:
updated_domains.append("DNS configuration")
# Tell nsd to reload changed zone files.
if len(updated_domains) > 0:
# 'reconfig' is needed if there are added or removed zones, but
# it may not reload existing zones, so we call 'reload' too. If
# nsd isn't running, nsd-control fails, so in that case revert
# to restarting nsd to make sure it is running. Restarting nsd
# should also refresh everything.
try:
shell('check_call', ["/usr/sbin/nsd-control", "reconfig"])
shell('check_call', ["/usr/sbin/nsd-control", "reload"])
except:
shell('check_call', ["/usr/sbin/service", "nsd", "restart"])
# Write the OpenDKIM configuration tables for all of the mail domains.
from mailconfig import get_mail_domains
if write_opendkim_tables(get_mail_domains(env), env):
# Settings changed. Kick opendkim.
shell('check_call', ["/usr/sbin/service", "opendkim", "restart"])
if len(updated_domains) == 0:
# If this is the only thing that changed?
updated_domains.append("OpenDKIM configuration")
# Clear bind9's DNS cache so our own DNS resolver is up to date.
# (ignore errors with trap=True)
shell('check_call', ["/usr/sbin/rndc", "flush"], trap=True)
if len(updated_domains) == 0:
# if nothing was updated (except maybe OpenDKIM's files), don't show any output
return ""
else:
return "updated DNS: " + ",".join(updated_domains) + "\n"
########################################################################
def build_zones(env):
# What domains (and their zone filenames) should we build?
domains = get_dns_domains(env)
zonefiles = get_dns_zones(env)
# Create a dictionary of domains to a set of attributes for each
# domain, such as whether there are mail users at the domain.
from mailconfig import get_mail_domains
from web_update import get_web_domains
mail_domains = set(get_mail_domains(env))
mail_user_domains = set(get_mail_domains(env, users_only=True)) # i.e. will log in for mail, Nextcloud
web_domains = set(get_web_domains(env))
auto_domains = web_domains - set(get_web_domains(env, include_auto=False))
domains |= auto_domains # www redirects not included in the initial list, see above
# Add ns1/ns2+PRIMARY_HOSTNAME which must also have A/AAAA records
# when the box is acting as authoritative DNS server for its domains.
for ns in ("ns1", "ns2"):
d = ns + "." + env["PRIMARY_HOSTNAME"]
domains.add(d)
auto_domains.add(d)
domains = {
domain: {
"user": domain in mail_user_domains,
"mail": domain in mail_domains,
"web": domain in web_domains,
"auto": domain in auto_domains,
}
for domain in domains
}
# For MTA-STS, we'll need to check if the PRIMARY_HOSTNAME certificate is
# singned and valid. Check that now rather than repeatedly for each domain.
domains[env["PRIMARY_HOSTNAME"]]["certificate-is-valid"] = is_domain_cert_signed_and_valid(env["PRIMARY_HOSTNAME"], env)
# Load custom records to add to zones.
additional_records = list(get_custom_dns_config(env))
# Build DNS records for each zone.
for domain, zonefile in zonefiles:
# Build the records to put in the zone.
records = build_zone(domain, domains, additional_records, env)
yield (domain, zonefile, records)
def build_zone(domain, domain_properties, additional_records, env, is_zone=True):
records = []
# For top-level zones, define the authoritative name servers.
#
# Normally we are our own nameservers. Some TLDs require two distinct IP addresses,
# so we allow the user to override the second nameserver definition so that
# secondary DNS can be set up elsewhere.
#
# 'False' in the tuple indicates these records would not be used if the zone
# is managed outside of the box.
if is_zone:
# Obligatory NS record to ns1.PRIMARY_HOSTNAME.
records.append((None, "NS", "ns1.%s." % env["PRIMARY_HOSTNAME"], False))
# NS record to ns2.PRIMARY_HOSTNAME or whatever the user overrides.
# User may provide one or more additional nameservers
secondary_ns_list = get_secondary_dns(additional_records, mode="NS") \
or ["ns2." + env["PRIMARY_HOSTNAME"]]
for secondary_ns in secondary_ns_list:
records.append((None, "NS", secondary_ns+'.', False))
# In PRIMARY_HOSTNAME...
if domain == env["PRIMARY_HOSTNAME"]:
# Set the A/AAAA records. Do this early for the PRIMARY_HOSTNAME so that the user cannot override them
# and we can provide different explanatory text.
records.append((None, "A", env["PUBLIC_IP"], "Required. Sets the IP address of the box."))
if env.get("PUBLIC_IPV6"): records.append((None, "AAAA", env["PUBLIC_IPV6"], "Required. Sets the IPv6 address of the box."))
# Add a DANE TLSA record for SMTP.
records.append(("_25._tcp", "TLSA", build_tlsa_record(env), "Recommended when DNSSEC is enabled. Advertises to mail servers connecting to the box that mandatory encryption should be used."))
# Add a DANE TLSA record for HTTPS, which some browser extensions might make use of.
records.append(("_443._tcp", "TLSA", build_tlsa_record(env), "Optional. When DNSSEC is enabled, provides out-of-band HTTPS certificate validation for a few web clients that support it."))
# Add a SSHFP records to help SSH key validation. One per available SSH key on this system.
for value in build_sshfp_records():
records.append((None, "SSHFP", value, "Optional. Provides an out-of-band method for verifying an SSH key before connecting. Use 'VerifyHostKeyDNS yes' (or 'VerifyHostKeyDNS ask') when connecting with ssh."))
# Add DNS records for any subdomains of this domain. We should not have a zone for
# both a domain and one of its subdomains.
if is_zone: # don't recurse when we're just loading data for a subdomain
subdomains = [d for d in domain_properties if d.endswith("." + domain)]
for subdomain in subdomains:
subdomain_qname = subdomain[0:-len("." + domain)]
subzone = build_zone(subdomain, domain_properties, additional_records, env, is_zone=False)
for child_qname, child_rtype, child_value, child_explanation in subzone:
if child_qname == None:
child_qname = subdomain_qname
else:
child_qname += "." + subdomain_qname
records.append((child_qname, child_rtype, child_value, child_explanation))
has_rec_base = list(records) # clone current state
def has_rec(qname, rtype, prefix=None):
for rec in has_rec_base:
if rec[0] == qname and rec[1] == rtype and (prefix is None or rec[2].startswith(prefix)):
return True
return False
# The user may set other records that don't conflict with our settings.
# Don't put any TXT records above this line, or it'll prevent any custom TXT records.
for qname, rtype, value in filter_custom_records(domain, additional_records):
# Don't allow custom records for record types that override anything above.
# But allow multiple custom records for the same rtype --- see how has_rec_base is used.
if has_rec(qname, rtype): continue
# The "local" keyword on A/AAAA records are short-hand for our own IP.
# This also flags for web configuration that the user wants a website here.
if rtype == "A" and value == "local":
value = env["PUBLIC_IP"]
if rtype == "AAAA" and value == "local":
if "PUBLIC_IPV6" in env:
value = env["PUBLIC_IPV6"]
else:
continue
records.append((qname, rtype, value, "(Set by user.)"))
# Add A/AAAA defaults if not overridden by the user's custom settings (and not otherwise configured).
# Any CNAME or A record on the qname overrides A and AAAA. But when we set the default A record,
# we should not cause the default AAAA record to be skipped because it thinks a custom A record
# was set. So set has_rec_base to a clone of the current set of DNS settings, and don't update
# during this process.
has_rec_base = list(records)
a_expl = "Required. May have a different value. Sets the IP address that %s resolves to for web hosting and other services besides mail. The A record must be present but its value does not affect mail delivery." % domain
if domain_properties[domain]["auto"]:
if domain.startswith("ns1.") or domain.startswith("ns2."): a_expl = False # omit from 'External DNS' page since this only applies if box is its own DNS server
if domain.startswith("www."): a_expl = "Optional. Sets the IP address that %s resolves to so that the box can provide a redirect to the parent domain." % domain
if domain.startswith("mta-sts."): a_expl = "Optional. MTA-STS Policy Host serving /.well-known/mta-sts.txt."
if domain.startswith("autoconfig."): a_expl = "Provides email configuration autodiscovery support for Thunderbird Autoconfig."
if domain.startswith("autodiscover."): a_expl = "Provides email configuration autodiscovery support for Z-Push ActiveSync Autodiscover."
defaults = [
(None, "A", env["PUBLIC_IP"], a_expl),
(None, "AAAA", env.get('PUBLIC_IPV6'), "Optional. Sets the IPv6 address that %s resolves to, e.g. for web hosting. (It is not necessary for receiving mail on this domain.)" % domain),
]
for qname, rtype, value, explanation in defaults:
if value is None or value.strip() == "": continue # skip IPV6 if not set
if not is_zone and qname == "www": continue # don't create any default 'www' subdomains on what are themselves subdomains
# Set the default record, but not if:
# (1) there is not a user-set record of the same type already
# (2) there is not a CNAME record already, since you can't set both and who knows what takes precedence
# (2) there is not an A record already (if this is an A record this is a dup of (1), and if this is an AAAA record then don't set a default AAAA record if the user sets a custom A record, since the default wouldn't make sense and it should not resolve if the user doesn't provide a new AAAA record)
if not has_rec(qname, rtype) and not has_rec(qname, "CNAME") and not has_rec(qname, "A"):
records.append((qname, rtype, value, explanation))
# Don't pin the list of records that has_rec checks against anymore.
has_rec_base = records
if domain_properties[domain]["mail"]:
# The MX record says where email for the domain should be delivered: Here!
if not has_rec(None, "MX", prefix="10 "):
records.append((None, "MX", "10 %s." % env["PRIMARY_HOSTNAME"], "Required. Specifies the hostname (and priority) of the machine that handles @%s mail." % domain))
# SPF record: Permit the box ('mx', see above) to send mail on behalf of
# the domain, and no one else.
# Skip if the user has set a custom SPF record.
if not has_rec(None, "TXT", prefix="v=spf1 "):
records.append((None, "TXT", 'v=spf1 mx -all', "Recommended. Specifies that only the box is permitted to send @%s mail." % domain))
# Append the DKIM TXT record to the zone as generated by OpenDKIM.
# Skip if the user has set a DKIM record already.
opendkim_record_file = os.path.join(env['STORAGE_ROOT'], 'mail/dkim/mail.txt')
with open(opendkim_record_file) as orf:
m = re.match(r'(\S+)\s+IN\s+TXT\s+\( ((?:"[^"]+"\s+)+)\)', orf.read(), re.S)
val = "".join(re.findall(r'"([^"]+)"', m.group(2)))
if not has_rec(m.group(1), "TXT", prefix="v=DKIM1; "):
records.append((m.group(1), "TXT", val, "Recommended. Provides a way for recipients to verify that this machine sent @%s mail." % domain))
# Append a DMARC record.
# Skip if the user has set a DMARC record already.
if not has_rec("_dmarc", "TXT", prefix="v=DMARC1; "):
records.append(("_dmarc", "TXT", 'v=DMARC1; p=quarantine;', "Recommended. Specifies that mail that does not originate from the box but claims to be from @%s or which does not have a valid DKIM signature is suspect and should be quarantined by the recipient's mail system." % domain))
if domain_properties[domain]["user"]:
# Add CardDAV/CalDAV SRV records on the non-primary hostname that points to the primary hostname
# for autoconfiguration of mail clients (so only domains hosting user accounts need it).
# The SRV record format is priority (0, whatever), weight (0, whatever), port, service provider hostname (w/ trailing dot).
if domain != env["PRIMARY_HOSTNAME"]:
for dav in ("card", "cal"):
qname = "_" + dav + "davs._tcp"
if not has_rec(qname, "SRV"):
records.append((qname, "SRV", "0 0 443 " + env["PRIMARY_HOSTNAME"] + ".", "Recommended. Specifies the hostname of the server that handles CardDAV/CalDAV services for email addresses on this domain."))
# If this is a domain name that there are email addresses configured for, i.e. "something@"
# this domain name, then the domain name is a MTA-STS (https://tools.ietf.org/html/rfc8461)
# Policy Domain.
#
# A "_mta-sts" TXT record signals the presence of a MTA-STS policy. The id field helps clients
# cache the policy. It should be stable so we don't update DNS unnecessarily but change when
# the policy changes. It must be at most 32 letters and numbers, so we compute a hash of the
# policy file.
#
# The policy itself is served at the "mta-sts" (no underscore) subdomain over HTTPS. Therefore
# the TLS certificate used by Postfix for STARTTLS must be a valid certificate for the MX
# domain name (PRIMARY_HOSTNAME) *and* the TLS certificate used by nginx for HTTPS on the mta-sts
# subdomain must be valid certificate for that domain. Do not set an MTA-STS policy if either
# certificate in use is not valid (e.g. because it is self-signed and a valid certificate has not
# yet been provisioned). Since we cannot provision a certificate without A/AAAA records, we
# always set them (by including them in the www domains) --- only the TXT records depend on there
# being valid certificates.
mta_sts_records = [ ]
if domain_properties[domain]["mail"] \
and domain_properties[env["PRIMARY_HOSTNAME"]]["certificate-is-valid"] \
and is_domain_cert_signed_and_valid("mta-sts." + domain, env):
# Compute an up-to-32-character hash of the policy file. We'll take a SHA-1 hash of the policy
# file (20 bytes) and encode it as base-64 (28 bytes, using alphanumeric alternate characters
# instead of '+' and '/' which are not allowed in an MTA-STS policy id) but then just take its
# first 20 characters, which is more than sufficient to change whenever the policy file changes
# (and ensures any '=' padding at the end of the base64 encoding is dropped).
with open("/var/lib/mailinabox/mta-sts.txt", "rb") as f:
mta_sts_policy_id = base64.b64encode(hashlib.sha1(f.read()).digest(), altchars=b"AA").decode("ascii")[0:20]
mta_sts_records.extend([
("_mta-sts", "TXT", "v=STSv1; id=" + mta_sts_policy_id, "Optional. Part of the MTA-STS policy for incoming mail. If set, a MTA-STS policy must also be published.")
])
# Enable SMTP TLS reporting (https://tools.ietf.org/html/rfc8460) if the user has set a config option.
# Skip if the rules below if the user has set a custom _smtp._tls record.
if env.get("MTA_STS_TLSRPT_RUA") and not has_rec("_smtp._tls", "TXT", prefix="v=TLSRPTv1;"):
mta_sts_records.append(("_smtp._tls", "TXT", "v=TLSRPTv1; rua=" + env["MTA_STS_TLSRPT_RUA"], "Optional. Enables MTA-STS reporting."))
for qname, rtype, value, explanation in mta_sts_records:
if not has_rec(qname, rtype):
records.append((qname, rtype, value, explanation))
# Add no-mail-here records for any qname that has an A or AAAA record
# but no MX record. This would include domain itself if domain is a
# non-mail domain and also may include qnames from custom DNS records.
# Do this once at the end of generating a zone.
if is_zone:
qnames_with_a = set(qname for (qname, rtype, value, explanation) in records if rtype in ("A", "AAAA"))
qnames_with_mx = set(qname for (qname, rtype, value, explanation) in records if rtype == "MX")
for qname in qnames_with_a - qnames_with_mx:
# Mark this domain as not sending mail with hard-fail SPF and DMARC records.
d = (qname+"." if qname else "") + domain
if not has_rec(qname, "TXT", prefix="v=spf1 "):
records.append((qname, "TXT", 'v=spf1 -all', "Recommended. Prevents use of this domain name for outbound mail by specifying that no servers are valid sources for mail from @%s. If you do send email from this domain name you should either override this record such that the SPF rule does allow the originating server, or, take the recommended approach and have the box handle mail for this domain (simply add any receiving alias at this domain name to make this machine treat the domain name as one of its mail domains)." % d))
if not has_rec("_dmarc" + ("."+qname if qname else ""), "TXT", prefix="v=DMARC1; "):
records.append(("_dmarc" + ("."+qname if qname else ""), "TXT", 'v=DMARC1; p=reject;', "Recommended. Prevents use of this domain name for outbound mail by specifying that the SPF rule should be honoured for mail from @%s." % d))
# And with a null MX record (https://explained-from-first-principles.com/email/#null-mx-record)
if not has_rec(qname, "MX"):
records.append((qname, "MX", '0 .', "Recommended. Prevents use of this domain name for incoming mail."))
# Sort the records. The None records *must* go first in the nsd zone file. Otherwise it doesn't matter.
records.sort(key = lambda rec : list(reversed(rec[0].split(".")) if rec[0] is not None else ""))
return records
def is_domain_cert_signed_and_valid(domain, env):
cert = get_ssl_certificates(env).get(domain)
if not cert: return False # no certificate provisioned
cert_status = check_certificate(domain, cert['certificate'], cert['private-key'])
return cert_status[0] == 'OK'
########################################################################
def build_tlsa_record(env):
# A DANE TLSA record in DNS specifies that connections on a port
# must use TLS and the certificate must match a particular criteria.
#
# Thanks to http://blog.huque.com/2012/10/dnssec-and-certificates.html
# and https://community.letsencrypt.org/t/please-avoid-3-0-1-and-3-0-2-dane-tlsa-records-with-le-certificates/7022
# for explaining all of this! Also see https://tools.ietf.org/html/rfc6698#section-2.1
# and https://github.com/mail-in-a-box/mailinabox/issues/268#issuecomment-167160243.
#
# There are several criteria. We used to use "3 0 1" criteria, which
# meant to pin a leaf (3) certificate (0) with SHA256 hash (1). But
# certificates change, and especially as we move to short-lived certs
# they change often. The TLSA record handily supports the criteria of
# a leaf certificate (3)'s subject public key (1) with SHA256 hash (1).
# The subject public key is the public key portion of the private key
# that generated the CSR that generated the certificate. Since we
# generate a private key once the first time Mail-in-a-Box is set up
# and reuse it for all subsequent certificates, the TLSA record will
# remain valid indefinitely.
from ssl_certificates import load_cert_chain, load_pem
from cryptography.hazmat.primitives.serialization import Encoding, PublicFormat
fn = os.path.join(env["STORAGE_ROOT"], "ssl", "ssl_certificate.pem")
cert = load_pem(load_cert_chain(fn)[0])
subject_public_key = cert.public_key().public_bytes(Encoding.DER, PublicFormat.SubjectPublicKeyInfo)
# We could have also loaded ssl_private_key.pem and called priv_key.public_key().public_bytes(...)
pk_hash = hashlib.sha256(subject_public_key).hexdigest()
# Specify the TLSA parameters:
# 3: Match the (leaf) certificate. (No CA, no trust path needed.)
# 1: Match its subject public key.
# 1: Use SHA256.
return "3 1 1 " + pk_hash
def build_sshfp_records():
# The SSHFP record is a way for us to embed this server's SSH public
# key fingerprint into the DNS so that remote hosts have an out-of-band
# method to confirm the fingerprint. See RFC 4255 and RFC 6594. This
# depends on DNSSEC.
#
# On the client side, set SSH's VerifyHostKeyDNS option to 'ask' to
# include this info in the key verification prompt or 'yes' to trust
# the SSHFP record.
#
# See https://github.com/xelerance/sshfp for inspiriation.
algorithm_number = {
"ssh-rsa": 1,
"ssh-dss": 2,
"ecdsa-sha2-nistp256": 3,
"ssh-ed25519": 4,
}
# Get our local fingerprints by running ssh-keyscan. The output looks
# like the known_hosts file: hostname, keytype, fingerprint. The order
# of the output is arbitrary, so sort it to prevent spurrious updates
# to the zone file (that trigger bumping the serial number). However,
# if SSH has been configured to listen on a nonstandard port, we must
# specify that port to sshkeyscan.
port = 22
with open('/etc/ssh/sshd_config', 'r') as f:
for line in f:
s = line.rstrip().split()
if len(s) == 2 and s[0] == 'Port':
try:
port = int(s[1])
except ValueError:
pass
break
keys = shell("check_output", ["ssh-keyscan", "-t", "rsa,dsa,ecdsa,ed25519", "-p", str(port), "localhost"])
keys = sorted(keys.split("\n"))
for key in keys:
if key.strip() == "" or key[0] == "#": continue
try:
host, keytype, pubkey = key.split(" ")
yield "%d %d ( %s )" % (
algorithm_number[keytype],
2, # specifies we are using SHA-256 on next line
hashlib.sha256(base64.b64decode(pubkey)).hexdigest().upper(),
)
except:
# Lots of things can go wrong. Don't let it disturb the DNS
# zone.
pass
########################################################################
def write_nsd_zone(domain, zonefile, records, env, force):
# On the $ORIGIN line, there's typically a ';' comment at the end explaining
# what the $ORIGIN line does. Any further data after the domain confuses
# ldns-signzone, however. It used to say '; default zone domain'.
#
# The SOA contact address for all of the domains on this system is hostmaster
# @ the PRIMARY_HOSTNAME. Hopefully that's legit.
#
# For the refresh through TTL fields, a good reference is:
# https://www.ripe.net/publications/docs/ripe-203
#
# A hash of the available DNSSEC keys are added in a comment so that when
# the keys change we force a re-generation of the zone which triggers
# re-signing it.
zone = """
$ORIGIN {domain}.
$TTL 86400 ; default time to live
@ IN SOA ns1.{primary_domain}. hostmaster.{primary_domain}. (
__SERIAL__ ; serial number
7200 ; Refresh (secondary nameserver update interval)
3600 ; Retry (when refresh fails, how often to try again, should be lower than the refresh)
1209600 ; Expire (when refresh fails, how long secondary nameserver will keep records around anyway)
86400 ; Negative TTL (how long negative responses are cached)
)
"""
# Replace replacement strings.
zone = zone.format(domain=domain, primary_domain=env["PRIMARY_HOSTNAME"])
# Add records.
for subdomain, querytype, value, explanation in records:
if subdomain:
zone += subdomain
zone += "\tIN\t" + querytype + "\t"
if querytype == "TXT":
# Divide into 255-byte max substrings.
v2 = ""
while len(value) > 0:
s = value[0:255]
value = value[255:]
s = s.replace('\\', '\\\\') # escape backslashes
s = s.replace('"', '\\"') # escape quotes
s = '"' + s + '"' # wrap in quotes
v2 += s + " "
value = v2
zone += value + "\n"
# Append a stable hash of DNSSEC signing keys in a comment.
zone += "\n; DNSSEC signing keys hash: {}\n".format(hash_dnssec_keys(domain, env))
# DNSSEC requires re-signing a zone periodically. That requires
# bumping the serial number even if no other records have changed.
# We don't see the DNSSEC records yet, so we have to figure out
# if a re-signing is necessary so we can prematurely bump the
# serial number.
force_bump = False
if not os.path.exists(zonefile + ".signed"):
# No signed file yet. Shouldn't normally happen unless a box
# is going from not using DNSSEC to using DNSSEC.
force_bump = True
else:
# We've signed the domain. Check if we are close to the expiration
# time of the signature. If so, we'll force a bump of the serial
# number so we can re-sign it.
with open(zonefile + ".signed") as f:
signed_zone = f.read()
expiration_times = re.findall(r"\sRRSIG\s+SOA\s+\d+\s+\d+\s\d+\s+(\d{14})", signed_zone)
if len(expiration_times) == 0:
# weird
force_bump = True
else:
# All of the times should be the same, but if not choose the soonest.
expiration_time = min(expiration_times)
expiration_time = datetime.datetime.strptime(expiration_time, "%Y%m%d%H%M%S")
if expiration_time - datetime.datetime.now() < datetime.timedelta(days=3):
# We're within three days of the expiration, so bump serial & resign.
force_bump = True
# Set the serial number.
serial = datetime.datetime.now().strftime("%Y%m%d00")
if os.path.exists(zonefile):
# If the zone already exists, is different, and has a later serial number,
# increment the number.
with open(zonefile) as f:
existing_zone = f.read()
m = re.search(r"(\d+)\s*;\s*serial number", existing_zone)
if m:
# Clear out the serial number in the existing zone file for the
# purposes of seeing if anything *else* in the zone has changed.
existing_serial = m.group(1)
existing_zone = existing_zone.replace(m.group(0), "__SERIAL__ ; serial number")
# If the existing zone is the same as the new zone (modulo the serial number),
# there is no need to update the file. Unless we're forcing a bump.
if zone == existing_zone and not force_bump and not force:
return False
# If the existing serial is not less than a serial number
# based on the current date plus 00, increment it. Otherwise,
# the serial number is less than our desired new serial number
# so we'll use the desired new number.
if existing_serial >= serial:
serial = str(int(existing_serial) + 1)
zone = zone.replace("__SERIAL__", serial)
# Write the zone file.
with open(zonefile, "w") as f:
f.write(zone)
return True # file is updated
def get_dns_zonefile(zone, env):
for domain, fn in get_dns_zones(env):
if zone == domain:
break
else:
raise ValueError("%s is not a domain name that corresponds to a zone." % zone)
nsd_zonefile = "/etc/nsd/zones/" + fn
with open(nsd_zonefile, "r") as f:
return f.read()
########################################################################
def write_nsd_conf(zonefiles, additional_records, env):
# Write the list of zones to a configuration file.
nsd_conf_file = "/etc/nsd/nsd.conf.d/zones.conf"
nsdconf = ""
# Append the zones.
for domain, zonefile in zonefiles:
nsdconf += """
zone:
name: %s
zonefile: %s
""" % (domain, zonefile)
# If custom secondary nameservers have been set, allow zone transfers
# and, if not a subnet, notifies to them.
for ipaddr in get_secondary_dns(additional_records, mode="xfr"):
if "/" not in ipaddr:
nsdconf += "\n\tnotify: %s NOKEY" % (ipaddr)
nsdconf += "\n\tprovide-xfr: %s NOKEY\n" % (ipaddr)
# Check if the file is changing. If it isn't changing,
# return False to flag that no change was made.
if os.path.exists(nsd_conf_file):
with open(nsd_conf_file) as f:
if f.read() == nsdconf:
return False
# Write out new contents and return True to signal that
# configuration changed.
with open(nsd_conf_file, "w") as f:
f.write(nsdconf)
return True
########################################################################
def find_dnssec_signing_keys(domain, env):
# For key that we generated (one per algorithm)...
d = os.path.join(env['STORAGE_ROOT'], 'dns/dnssec')
keyconfs = [f for f in os.listdir(d) if f.endswith(".conf")]
for keyconf in keyconfs:
# Load the file holding the KSK and ZSK key filenames.
keyconf_fn = os.path.join(d, keyconf)
keyinfo = load_env_vars_from_file(keyconf_fn)
# Skip this key if the conf file has a setting named DOMAINS,
# holding a comma-separated list of domain names, and if this
# domain is not in the list. This allows easily disabling a
# key by setting "DOMAINS=" or "DOMAINS=none", other than
# deleting the key's .conf file, which might result in the key
# being regenerated next upgrade. Keys should be disabled if
# they are not needed to reduce the DNSSEC query response size.
if "DOMAINS" in keyinfo and domain not in [dd.strip() for dd in keyinfo["DOMAINS"].split(",")]:
continue
for keytype in ("KSK", "ZSK"):
yield keytype, keyinfo[keytype]
def hash_dnssec_keys(domain, env):
# Create a stable (by sorting the items) hash of all of the private keys
# that will be used to sign this domain.
keydata = []
for keytype, keyfn in sorted(find_dnssec_signing_keys(domain, env)):
oldkeyfn = os.path.join(env['STORAGE_ROOT'], 'dns/dnssec', keyfn + ".private")
keydata.append(keytype)
keydata.append(keyfn)
with open(oldkeyfn, "r") as fr:
keydata.append( fr.read() )
keydata = "".join(keydata).encode("utf8")
return hashlib.sha1(keydata).hexdigest()
def sign_zone(domain, zonefile, env):
# Sign the zone with all of the keys that were generated during
# setup so that the user can choose which to use in their DS record at
# their registrar, and also to support migration to newer algorithms.
# In order to use the key files generated at setup which are for
# the domain _domain_, we have to re-write the files and place
# the actual domain name in it, so that ldns-signzone works.
#
# Patch each key, storing the patched version in /tmp for now.
# Each key has a .key and .private file. Collect a list of filenames
# for all of the keys (and separately just the key-signing keys).
all_keys = []
ksk_keys = []
for keytype, keyfn in find_dnssec_signing_keys(domain, env):
newkeyfn = '/tmp/' + keyfn.replace("_domain_", domain)
for ext in (".private", ".key"):
# Copy the .key and .private files to /tmp to patch them up.
#
# Use os.umask and open().write() to securely create a copy that only
# we (root) can read.
oldkeyfn = os.path.join(env['STORAGE_ROOT'], 'dns/dnssec', keyfn + ext)
with open(oldkeyfn, "r") as fr:
keydata = fr.read()
keydata = keydata.replace("_domain_", domain)
prev_umask = os.umask(0o77) # ensure written file is not world-readable
try:
with open(newkeyfn + ext, "w") as fw:
fw.write(keydata)
finally:
os.umask(prev_umask) # other files we write should be world-readable
# Put the patched key filename base (without extension) into the list of keys we'll sign with.
all_keys.append(newkeyfn)
if keytype == "KSK": ksk_keys.append(newkeyfn)
# Do the signing.
expiry_date = (datetime.datetime.now() + datetime.timedelta(days=30)).strftime("%Y%m%d")
shell('check_call', ["/usr/bin/ldns-signzone",
# expire the zone after 30 days
"-e", expiry_date,
# use NSEC3
"-n",
# zonefile to sign
"/etc/nsd/zones/" + zonefile,
]
# keys to sign with (order doesn't matter -- it'll figure it out)
+ all_keys
)
# Create a DS record based on the patched-up key files. The DS record is specific to the
# zone being signed, so we can't use the .ds files generated when we created the keys.
# The DS record points to the KSK only. Write this next to the zone file so we can
# get it later to give to the user with instructions on what to do with it.
#
# Generate a DS record for each key. There are also several possible hash algorithms that may
# be used, so we'll pre-generate all for each key. One DS record per line. Only one
# needs to actually be deployed at the registrar. We'll select the preferred one
# in the status checks.
with open("/etc/nsd/zones/" + zonefile + ".ds", "w") as f:
for key in ksk_keys:
for digest_type in ('1', '2', '4'):
rr_ds = shell('check_output', ["/usr/bin/ldns-key2ds",
"-n", # output to stdout
"-" + digest_type, # 1=SHA1, 2=SHA256, 4=SHA384
key + ".key"
])
f.write(rr_ds)
# Remove the temporary patched key files.
for fn in all_keys:
os.unlink(fn + ".private")
os.unlink(fn + ".key")
########################################################################
def write_opendkim_tables(domains, env):
# Append a record to OpenDKIM's KeyTable and SigningTable for each domain
# that we send mail from (zones and all subdomains).
opendkim_key_file = os.path.join(env['STORAGE_ROOT'], 'mail/dkim/mail.private')
if not os.path.exists(opendkim_key_file):
# Looks like OpenDKIM is not installed.
return False
config = {
# The SigningTable maps email addresses to a key in the KeyTable that
# specifies signing information for matching email addresses. Here we
# map each domain to a same-named key.
#
# Elsewhere we set the DMARC policy for each domain such that mail claiming
# to be From: the domain must be signed with a DKIM key on the same domain.
# So we must have a separate KeyTable entry for each domain.
"SigningTable":
"".join(
"*@{domain} {domain}\n".format(domain=domain)
for domain in domains
),
# The KeyTable specifies the signing domain, the DKIM selector, and the
# path to the private key to use for signing some mail. Per DMARC, the
# signing domain must match the sender's From: domain.
"KeyTable":
"".join(
"{domain} {domain}:mail:{key_file}\n".format(domain=domain, key_file=opendkim_key_file)
for domain in domains
),
}
did_update = False
for filename, content in config.items():
# Don't write the file if it doesn't need an update.
if os.path.exists("/etc/opendkim/" + filename):
with open("/etc/opendkim/" + filename) as f:
if f.read() == content:
continue
# The contents needs to change.
with open("/etc/opendkim/" + filename, "w") as f:
f.write(content)
did_update = True
# Return whether the files changed. If they didn't change, there's
# no need to kick the opendkim process.
return did_update
########################################################################
def get_custom_dns_config(env, only_real_records=False):
try:
custom_dns = rtyaml.load(open(os.path.join(env['STORAGE_ROOT'], 'dns/custom.yaml')))
if not isinstance(custom_dns, dict): raise ValueError() # caught below
except:
return [ ]
for qname, value in custom_dns.items():
if qname == "_secondary_nameserver" and only_real_records: continue # skip fake record
# Short form. Mapping a domain name to a string is short-hand
# for creating A records.
if isinstance(value, str):
values = [("A", value)]
# A mapping creates multiple records.
elif isinstance(value, dict):
values = value.items()
# No other type of data is allowed.
else:
raise ValueError()
for rtype, value2 in values:
if isinstance(value2, str):
yield (qname, rtype, value2)
elif isinstance(value2, list):
for value3 in value2:
yield (qname, rtype, value3)
# No other type of data is allowed.
else:
raise ValueError()
def filter_custom_records(domain, custom_dns_iter):
for qname, rtype, value in custom_dns_iter:
# We don't count the secondary nameserver config (if present) as a record - that would just be
# confusing to users. Instead it is accessed/manipulated directly via (get/set)_custom_dns_config.
if qname == "_secondary_nameserver": continue
# Is this record for the domain or one of its subdomains?
# If `domain` is None, return records for all domains.
if domain is not None and qname != domain and not qname.endswith("." + domain): continue
# Turn the fully qualified domain name in the YAML file into
# our short form (None => domain, or a relative QNAME) if
# domain is not None.
if domain is not None:
if qname == domain:
qname = None
else:
qname = qname[0:len(qname)-len("." + domain)]
yield (qname, rtype, value)
def write_custom_dns_config(config, env):
# We get a list of (qname, rtype, value) triples. Convert this into a
# nice dictionary format for storage on disk.
from collections import OrderedDict
config = list(config)
dns = OrderedDict()
seen_qnames = set()
# Process the qnames in the order we see them.
for qname in [rec[0] for rec in config]:
if qname in seen_qnames: continue
seen_qnames.add(qname)
records = [(rec[1], rec[2]) for rec in config if rec[0] == qname]
if len(records) == 1 and records[0][0] == "A":
dns[qname] = records[0][1]
else:
dns[qname] = OrderedDict()
seen_rtypes = set()
# Process the rtypes in the order we see them.
for rtype in [rec[0] for rec in records]:
if rtype in seen_rtypes: continue
seen_rtypes.add(rtype)
values = [rec[1] for rec in records if rec[0] == rtype]
if len(values) == 1:
values = values[0]
dns[qname][rtype] = values
# Write.
config_yaml = rtyaml.dump(dns)
with open(os.path.join(env['STORAGE_ROOT'], 'dns/custom.yaml'), "w") as f:
f.write(config_yaml)
def set_custom_dns_record(qname, rtype, value, action, env):
# validate qname
for zone, fn in get_dns_zones(env):
# It must match a zone apex or be a subdomain of a zone
# that we are otherwise hosting.
if qname == zone or qname.endswith("."+zone):
break
else:
# No match.
if qname != "_secondary_nameserver":
raise ValueError("%s is not a domain name or a subdomain of a domain name managed by this box." % qname)
# validate rtype
rtype = rtype.upper()
if value is not None and qname != "_secondary_nameserver":
if not re.search(DOMAIN_RE, qname):
raise ValueError("Invalid name.")
if rtype in ("A", "AAAA"):
if value != "local": # "local" is a special flag for us
v = ipaddress.ip_address(value) # raises a ValueError if there's a problem
if rtype == "A" and not isinstance(v, ipaddress.IPv4Address): raise ValueError("That's an IPv6 address.")
if rtype == "AAAA" and not isinstance(v, ipaddress.IPv6Address): raise ValueError("That's an IPv4 address.")
elif rtype in ("CNAME", "NS"):
if rtype == "NS" and qname == zone:
raise ValueError("NS records can only be set for subdomains.")
# ensure value has a trailing dot
if not value.endswith("."):
value = value + "."
if not re.search(DOMAIN_RE, value):
raise ValueError("Invalid value.")
elif rtype in ("CNAME", "TXT", "SRV", "MX", "SSHFP", "CAA"):
# anything goes
pass
else:
raise ValueError("Unknown record type '%s'." % rtype)
# load existing config
config = list(get_custom_dns_config(env))
# update
newconfig = []
made_change = False
needs_add = True
for _qname, _rtype, _value in config:
if action == "add":
if (_qname, _rtype, _value) == (qname, rtype, value):
# Record already exists. Bail.
return False
elif action == "set":
if (_qname, _rtype) == (qname, rtype):
if _value == value:
# Flag that the record already exists, don't
# need to add it.
needs_add = False
else:
# Drop any other values for this (qname, rtype).
made_change = True
continue
elif action == "remove":
if (_qname, _rtype, _value) == (qname, rtype, value):
# Drop this record.
made_change = True
continue
if value == None and (_qname, _rtype) == (qname, rtype):
# Drop all qname-rtype records.
made_change = True
continue
else:
raise ValueError("Invalid action: " + action)
# Preserve this record.
newconfig.append((_qname, _rtype, _value))
if action in ("add", "set") and needs_add and value is not None:
newconfig.append((qname, rtype, value))
made_change = True
if made_change:
# serialize & save
write_custom_dns_config(newconfig, env)
return made_change
########################################################################
def get_secondary_dns(custom_dns, mode=None):
resolver = dns.resolver.get_default_resolver()
resolver.timeout = 10
values = []
for qname, rtype, value in custom_dns:
if qname != '_secondary_nameserver': continue
for hostname in value.split(" "):
hostname = hostname.strip()
if mode == None:
# Just return the setting.
values.append(hostname)
continue
# This is a hostname. Before including in zone xfr lines,
# resolve to an IP address. Otherwise just return the hostname.
# It may not resolve to IPv6, so don't throw an exception if it
# doesn't.
if not hostname.startswith("xfr:"):
if mode == "xfr":
response = dns.resolver.resolve(hostname+'.', "A", raise_on_no_answer=False)
values.extend(map(str, response))
response = dns.resolver.resolve(hostname+'.', "AAAA", raise_on_no_answer=False)
values.extend(map(str, response))
continue
values.append(hostname)
# This is a zone-xfer-only IP address. Do not return if
# we're querying for NS record hostnames. Only return if
# we're querying for zone xfer IP addresses - return the
# IP address.
elif mode == "xfr":
values.append(hostname[4:])
return values
def set_secondary_dns(hostnames, env):
if len(hostnames) > 0:
# Validate that all hostnames are valid and that all zone-xfer IP addresses are valid.
resolver = dns.resolver.get_default_resolver()
resolver.timeout = 5
for item in hostnames:
if not item.startswith("xfr:"):
# Resolve hostname.
try:
response = resolver.resolve(item, "A")
except (dns.resolver.NoNameservers, dns.resolver.NXDOMAIN, dns.resolver.NoAnswer):
try:
response = resolver.resolve(item, "AAAA")
except (dns.resolver.NoNameservers, dns.resolver.NXDOMAIN, dns.resolver.NoAnswer):
raise ValueError("Could not resolve the IP address of %s." % item)
else:
# Validate IP address.
try:
if "/" in item[4:]:
v = ipaddress.ip_network(item[4:]) # raises a ValueError if there's a problem
else:
v = ipaddress.ip_address(item[4:]) # raises a ValueError if there's a problem
except ValueError:
raise ValueError("'%s' is not an IPv4 or IPv6 address or subnet." % item[4:])
# Set.
set_custom_dns_record("_secondary_nameserver", "A", " ".join(hostnames), "set", env)
else:
# Clear.
set_custom_dns_record("_secondary_nameserver", "A", None, "set", env)
# Apply.
return do_dns_update(env)
def get_custom_dns_records(custom_dns, qname, rtype):
for qname1, rtype1, value in custom_dns:
if qname1 == qname and rtype1 == rtype:
yield value
return None
########################################################################
def build_recommended_dns(env):
ret = []
for (domain, zonefile, records) in build_zones(env):
# remove records that we don't dislay
records = [r for r in records if r[3] is not False]
# put Required at the top, then Recommended, then everythiing else
records.sort(key = lambda r : 0 if r[3].startswith("Required.") else (1 if r[3].startswith("Recommended.") else 2))
# expand qnames
for i in range(len(records)):
if records[i][0] == None:
qname = domain
else:
qname = records[i][0] + "." + domain
records[i] = {
"qname": qname,
"rtype": records[i][1],
"value": records[i][2],
"explanation": records[i][3],
}
# return
ret.append((domain, records))
return ret
if __name__ == "__main__":
from utils import load_environment
env = load_environment()
if sys.argv[-1] == "--lint":
write_custom_dns_config(get_custom_dns_config(env), env)
else:
for zone, records in build_recommended_dns(env):
for record in records:
print("; " + record['explanation'])
print(record['qname'], record['rtype'], record['value'], sep="\t")
print()
|
cc0-1.0
|
5089a4b6c97af446d26ec3b31e59c3b1
| 41.412455
| 531
| 0.683059
| 3.293363
| false
| false
| false
| false
|
mcedit/pymclevel
|
mce.py
|
2
|
47597
|
#!/usr/bin/env python
import mclevelbase
import mclevel
import materials
import infiniteworld
import sys
import os
from box import BoundingBox, Vector
import numpy
from numpy import zeros, bincount
import logging
import itertools
import traceback
import shlex
import operator
import codecs
from math import floor
try:
import readline # if available, used by raw_input()
except:
pass
class UsageError(RuntimeError):
pass
class BlockMatchError(RuntimeError):
pass
class PlayerNotFound(RuntimeError):
pass
class mce(object):
"""
Block commands:
{commandPrefix}clone <sourceBox> <destPoint> [noair] [nowater]
{commandPrefix}fill <blockType> [ <box> ]
{commandPrefix}replace <blockType> [with] <newBlockType> [ <box> ]
{commandPrefix}export <filename> <sourceBox>
{commandPrefix}import <filename> <destPoint> [noair] [nowater]
{commandPrefix}createChest <point> <item> [ <count> ]
{commandPrefix}analyze
Player commands:
{commandPrefix}player [ <player> [ <point> ] ]
{commandPrefix}spawn [ <point> ]
Entity commands:
{commandPrefix}removeEntities [ <EntityID> ]
{commandPrefix}dumpSigns [ <filename> ]
{commandPrefix}dumpChests [ <filename> ]
Chunk commands:
{commandPrefix}createChunks <box>
{commandPrefix}deleteChunks <box>
{commandPrefix}prune <box>
{commandPrefix}relight [ <box> ]
World commands:
{commandPrefix}create <filename>
{commandPrefix}dimension [ <dim> ]
{commandPrefix}degrief
{commandPrefix}time [ <time> ]
{commandPrefix}worldsize
{commandPrefix}heightmap <filename>
{commandPrefix}randomseed [ <seed> ]
{commandPrefix}gametype [ <player> [ <gametype> ] ]
Editor commands:
{commandPrefix}save
{commandPrefix}reload
{commandPrefix}load <filename> | <world number>
{commandPrefix}execute <filename>
{commandPrefix}quit
Informational:
{commandPrefix}blocks [ <block name> | <block ID> ]
{commandPrefix}help [ <command> ]
**IMPORTANT**
{commandPrefix}box
Type 'box' to learn how to specify points and areas.
"""
random_seed = os.getenv('MCE_RANDOM_SEED', None)
last_played = os.getenv("MCE_LAST_PLAYED", None)
def commandUsage(self, command):
" returns usage info for the named command - just give the docstring for the handler func "
func = getattr(self, "_" + command)
return func.__doc__
commands = [
"clone",
"fill",
"replace",
"export",
"execute",
"import",
"createchest",
"player",
"spawn",
"removeentities",
"dumpsigns",
"dumpchests",
"createchunks",
"deletechunks",
"prune",
"relight",
"create",
"degrief",
"time",
"worldsize",
"heightmap",
"randomseed",
"gametype",
"save",
"load",
"reload",
"dimension",
"repair",
"quit",
"exit",
"help",
"blocks",
"analyze",
"region",
"debug",
"log",
"box",
]
debug = False
needsSave = False
def readInt(self, command):
try:
val = int(command.pop(0))
except ValueError:
raise UsageError("Cannot understand numeric input")
return val
def prettySplit(self, command):
cmdstring = " ".join(command)
lex = shlex.shlex(cmdstring)
lex.whitespace_split = True
lex.whitespace += "(),"
command[:] = list(lex)
def readBox(self, command):
self.prettySplit(command)
sourcePoint = self.readIntPoint(command)
if command[0].lower() == "to":
command.pop(0)
sourcePoint2 = self.readIntPoint(command)
sourceSize = sourcePoint2 - sourcePoint
else:
sourceSize = self.readIntPoint(command, isPoint=False)
if len([p for p in sourceSize if p <= 0]):
raise UsageError("Box size cannot be zero or negative")
box = BoundingBox(sourcePoint, sourceSize)
return box
def readIntPoint(self, command, isPoint=True):
point = self.readPoint(command, isPoint)
point = map(int, map(floor, point))
return Vector(*point)
def readPoint(self, command, isPoint=True):
self.prettySplit(command)
try:
word = command.pop(0)
if isPoint and (word in self.level.players):
x, y, z = self.level.getPlayerPosition(word)
if len(command) and command[0].lower() == "delta":
command.pop(0)
try:
x += int(command.pop(0))
y += int(command.pop(0))
z += int(command.pop(0))
except ValueError:
raise UsageError("Error decoding point input (expected a number).")
return x, y, z
except IndexError:
raise UsageError("Error decoding point input (expected more values).")
try:
try:
x = float(word)
except ValueError:
if isPoint:
raise PlayerNotFound(word)
raise
y = float(command.pop(0))
z = float(command.pop(0))
except ValueError:
raise UsageError("Error decoding point input (expected a number).")
except IndexError:
raise UsageError("Error decoding point input (expected more values).")
return x, y, z
def readBlockInfo(self, command):
keyword = command.pop(0)
matches = self.level.materials.blocksMatching(keyword)
blockInfo = None
if len(matches):
if len(matches) == 1:
blockInfo = matches[0]
# eat up more words that possibly specify a block. stop eating when 0 matching blocks.
while len(command):
newMatches = self.level.materials.blocksMatching(keyword + " " + command[0])
if len(newMatches) == 1:
blockInfo = newMatches[0]
if len(newMatches) > 0:
matches = newMatches
keyword = keyword + " " + command.pop(0)
else:
break
else:
try:
data = 0
if ":" in keyword:
blockID, data = map(int, keyword.split(":"))
else:
blockID = int(keyword)
blockInfo = self.level.materials.blockWithID(blockID, data)
except ValueError:
blockInfo = None
if blockInfo is None:
print "Ambiguous block specifier: ", keyword
if len(matches):
print "Matches: "
for m in matches:
if m == self.level.materials.defaultName:
continue
print "{0:3}:{1:<2} : {2}".format(m.ID, m.blockData, m.name)
else:
print "No blocks matched."
raise BlockMatchError
return blockInfo
def readBlocksToCopy(self, command):
blocksToCopy = range(materials.id_limit)
while len(command):
word = command.pop()
if word == "noair":
blocksToCopy.remove(0)
if word == "nowater":
blocksToCopy.remove(8)
blocksToCopy.remove(9)
return blocksToCopy
def _box(self, command):
"""
Boxes:
Many commands require a <box> as arguments. A box can be specified with
a point and a size:
(12, 5, 15), (5, 5, 5)
or with two points, making sure to put the keyword "to" between them:
(12, 5, 15) to (17, 10, 20)
The commas and parentheses are not important.
You may add them for improved readability.
Points:
Points and sizes are triplets of numbers ordered X Y Z.
X is position north-south, increasing southward.
Y is position up-down, increasing upward.
Z is position east-west, increasing westward.
Players:
A player's name can be used as a point - it will use the
position of the player's head. Use the keyword 'delta' after
the name to specify a point near the player.
Example:
codewarrior delta 0 5 0
This refers to a point 5 blocks above codewarrior's head.
"""
raise UsageError
def _debug(self, command):
self.debug = not self.debug
print "Debug", ("disabled", "enabled")[self.debug]
def _log(self, command):
"""
log [ <number> ]
Get or set the log threshold. 0 logs everything; 50 only logs major errors.
"""
if len(command):
try:
logging.getLogger().level = int(command[0])
except ValueError:
raise UsageError("Cannot understand numeric input.")
else:
print "Log level: {0}".format(logging.getLogger().level)
def _clone(self, command):
"""
clone <sourceBox> <destPoint> [noair] [nowater]
Clone blocks in a cuboid starting at sourcePoint and extending for
sourceSize blocks in each direction. Blocks and entities in the area
are cloned at destPoint.
"""
if len(command) == 0:
self.printUsage("clone")
return
box = self.readBox(command)
destPoint = self.readPoint(command)
destPoint = map(int, map(floor, destPoint))
blocksToCopy = self.readBlocksToCopy(command)
tempSchematic = self.level.extractSchematic(box)
self.level.copyBlocksFrom(tempSchematic, BoundingBox((0, 0, 0), box.origin), destPoint, blocksToCopy)
self.needsSave = True
print "Cloned 0 blocks."
def _fill(self, command):
"""
fill <blockType> [ <box> ]
Fill blocks with blockType in a cuboid starting at point and
extending for size blocks in each direction. Without a
destination, fills the whole world. blockType and may be a
number from 0-255 or a name listed by the 'blocks' command.
"""
if len(command) == 0:
self.printUsage("fill")
return
blockInfo = self.readBlockInfo(command)
if len(command):
box = self.readBox(command)
else:
box = None
print "Filling with {0}".format(blockInfo.name)
self.level.fillBlocks(box, blockInfo)
self.needsSave = True
print "Filled {0} blocks.".format("all" if box is None else box.volume)
def _replace(self, command):
"""
replace <blockType> [with] <newBlockType> [ <box> ]
Replace all blockType blocks with newBlockType in a cuboid
starting at point and extending for size blocks in
each direction. Without a destination, replaces blocks over
the whole world. blockType and newBlockType may be numbers
from 0-255 or names listed by the 'blocks' command.
"""
if len(command) == 0:
self.printUsage("replace")
return
blockInfo = self.readBlockInfo(command)
if command[0].lower() == "with":
command.pop(0)
newBlockInfo = self.readBlockInfo(command)
if len(command):
box = self.readBox(command)
else:
box = None
print "Replacing {0} with {1}".format(blockInfo.name, newBlockInfo.name)
self.level.fillBlocks(box, newBlockInfo, blocksToReplace=[blockInfo])
self.needsSave = True
print "Done."
def _createchest(self, command):
"""
createChest <point> <item> [ <count> ]
Create a chest filled with the specified item.
Stacks are 64 if count is not given.
"""
point = map(lambda x: int(floor(float(x))), self.readPoint(command))
itemID = self.readInt(command)
count = 64
if len(command):
count = self.readInt(command)
chest = mclevel.MCSchematic.chestWithItemID(itemID, count)
self.level.copyBlocksFrom(chest, chest.bounds, point)
self.needsSave = True
def _analyze(self, command):
"""
analyze
Counts all of the block types in every chunk of the world.
"""
blockCounts = zeros((65536,), 'uint64')
sizeOnDisk = 0
print "Analyzing {0} chunks...".format(self.level.chunkCount)
# for input to bincount, create an array of uint16s by
# shifting the data left and adding the blocks
for i, cPos in enumerate(self.level.allChunks, 1):
ch = self.level.getChunk(*cPos)
btypes = numpy.array(ch.Data.ravel(), dtype='uint16')
btypes <<= 12
btypes += ch.Blocks.ravel()
counts = bincount(btypes)
blockCounts[:counts.shape[0]] += counts
if i % 100 == 0:
logging.info("Chunk {0}...".format(i))
for blockID in range(materials.id_limit):
block = self.level.materials.blockWithID(blockID, 0)
if block.hasVariants:
for data in range(16):
i = (data << 12) + blockID
if blockCounts[i]:
idstring = "({id}:{data})".format(id=blockID, data=data)
print "{idstring:9} {name:30}: {count:<10}".format(
idstring=idstring, name=self.level.materials.blockWithID(blockID, data).name, count=blockCounts[i])
else:
count = int(sum(blockCounts[(d << 12) + blockID] for d in range(16)))
if count:
idstring = "({id})".format(id=blockID)
print "{idstring:9} {name:30}: {count:<10}".format(
idstring=idstring, name=self.level.materials.blockWithID(blockID, 0).name, count=count)
self.needsSave = True
def _export(self, command):
"""
export <filename> <sourceBox>
Exports blocks in the specified region to a file in schematic format.
This file can be imported with mce or MCEdit.
"""
if len(command) == 0:
self.printUsage("export")
return
filename = command.pop(0)
box = self.readBox(command)
tempSchematic = self.level.extractSchematic(box)
tempSchematic.saveToFile(filename)
print "Exported {0} blocks.".format(tempSchematic.bounds.volume)
def _import(self, command):
"""
import <filename> <destPoint> [noair] [nowater]
Imports a level or schematic into this world, beginning at destPoint.
Supported formats include
- Alpha single or multiplayer world folder containing level.dat,
- Zipfile containing Alpha world folder,
- Classic single-player .mine,
- Classic multiplayer server_level.dat,
- Indev .mclevel
- Schematic from RedstoneSim, MCEdit, mce
- .inv from INVEdit (appears as a chest)
"""
if len(command) == 0:
self.printUsage("import")
return
filename = command.pop(0)
destPoint = self.readPoint(command)
blocksToCopy = self.readBlocksToCopy(command)
importLevel = mclevel.fromFile(filename)
self.level.copyBlocksFrom(importLevel, importLevel.bounds, destPoint, blocksToCopy, create=True)
self.needsSave = True
print "Imported {0} blocks.".format(importLevel.bounds.volume)
def _player(self, command):
"""
player [ <player> [ <point> ] ]
Move the named player to the specified point.
Without a point, prints the named player's position.
Without a player, prints all players and positions.
In a single-player world, the player is named Player.
"""
if len(command) == 0:
print "Players: "
for player in self.level.players:
print " {0}: {1}".format(player, self.level.getPlayerPosition(player))
return
player = command.pop(0)
if len(command) == 0:
print "Player {0}: {1}".format(player, self.level.getPlayerPosition(player))
return
point = self.readPoint(command)
self.level.setPlayerPosition(point, player)
self.needsSave = True
print "Moved player {0} to {1}".format(player, point)
def _spawn(self, command):
"""
spawn [ <point> ]
Move the world's spawn point.
Without a point, prints the world's spawn point.
"""
if len(command):
point = self.readPoint(command)
point = map(int, map(floor, point))
self.level.setPlayerSpawnPosition(point)
self.needsSave = True
print "Moved spawn point to ", point
else:
print "Spawn point: ", self.level.playerSpawnPosition()
def _dumpsigns(self, command):
"""
dumpSigns [ <filename> ]
Saves the text and location of every sign in the world to a text file.
With no filename, saves signs to <worldname>.signs
Output is newline-delimited. 5 lines per sign. Coordinates are
on the first line, followed by four lines of sign text. For example:
[229, 118, -15]
"To boldly go
where no man
has gone
before."
Coordinates are ordered the same as point inputs:
[North/South, Down/Up, East/West]
"""
if len(command):
filename = command[0]
else:
filename = self.level.displayName + ".signs"
# We happen to encode the output file in UTF-8 too, although
# we could use another UTF encoding. The '-sig' encoding puts
# a signature at the start of the output file that tools such
# as Microsoft Windows Notepad and Emacs understand to mean
# the file has UTF-8 encoding.
outFile = codecs.open(filename, "w", encoding='utf-8-sig')
print "Dumping signs..."
signCount = 0
for i, cPos in enumerate(self.level.allChunks):
try:
chunk = self.level.getChunk(*cPos)
except mclevelbase.ChunkMalformed:
continue
for tileEntity in chunk.TileEntities:
if tileEntity["id"].value == "Sign":
signCount += 1
outFile.write(str(map(lambda x: tileEntity[x].value, "xyz")) + "\n")
for i in range(4):
signText = tileEntity["Text{0}".format(i + 1)].value
outFile.write(signText + u"\n")
if i % 100 == 0:
print "Chunk {0}...".format(i)
print "Dumped {0} signs to {1}".format(signCount, filename)
outFile.close()
def _region(self, command):
"""
region [rx rz]
List region files in this world.
"""
level = self.level
assert(isinstance(level, mclevel.MCInfdevOldLevel))
assert level.version
def getFreeSectors(rf):
runs = []
start = None
count = 0
for i, free in enumerate(rf.freeSectors):
if free:
if start is None:
start = i
count = 1
else:
count += 1
else:
if start is None:
pass
else:
runs.append((start, count))
start = None
count = 0
return runs
def printFreeSectors(runs):
for i, (start, count) in enumerate(runs):
if i % 4 == 3:
print ""
print "{start:>6}+{count:<4}".format(**locals()),
print ""
if len(command):
if len(command) > 1:
rx, rz = map(int, command[:2])
print "Calling allChunks to preload region files: %d chunks" % len(level.allChunks)
rf = level.regionFiles.get((rx, rz))
if rf is None:
print "Region {rx},{rz} not found.".format(**locals())
return
print "Region {rx:6}, {rz:6}: {used}/{sectors} sectors".format(used=rf.usedSectors, sectors=rf.sectorCount)
print "Offset Table:"
for cx in range(32):
for cz in range(32):
if cz % 4 == 0:
print ""
print "{0:3}, {1:3}: ".format(cx, cz),
off = rf.getOffset(cx, cz)
sector, length = off >> 8, off & 0xff
print "{sector:>6}+{length:<2} ".format(**locals()),
print ""
runs = getFreeSectors(rf)
if len(runs):
print "Free sectors:",
printFreeSectors(runs)
else:
if command[0] == "free":
print "Calling allChunks to preload region files: %d chunks" % len(level.allChunks)
for (rx, rz), rf in level.regionFiles.iteritems():
runs = getFreeSectors(rf)
if len(runs):
print "R {0:3}, {1:3}:".format(rx, rz),
printFreeSectors(runs)
else:
print "Calling allChunks to preload region files: %d chunks" % len(level.allChunks)
coords = (r for r in level.regionFiles)
for i, (rx, rz) in enumerate(coords):
print "({rx:6}, {rz:6}): {count}, ".format(count=level.regionFiles[rx, rz].chunkCount),
if i % 5 == 4:
print ""
def _repair(self, command):
"""
repair
Attempt to repair inconsistent region files.
MAKE A BACKUP. WILL DELETE YOUR DATA.
Scans for and repairs errors in region files:
Deletes chunks whose sectors overlap with another chunk
Rearranges chunks that are in the wrong slot in the offset table
Deletes completely unreadable chunks
Only usable with region-format saves.
"""
if self.level.version:
self.level.preloadRegions()
for rf in self.level.regionFiles.itervalues():
rf.repair()
def _dumpchests(self, command):
"""
dumpChests [ <filename> ]
Saves the content and location of every chest in the world to a text file.
With no filename, saves signs to <worldname>.chests
Output is delimited by brackets and newlines. A set of coordinates in
brackets begins a chest, followed by a line for each inventory slot.
For example:
[222, 51, 22]
2 String
3 String
3 Iron bar
Coordinates are ordered the same as point inputs:
[North/South, Down/Up, East/West]
"""
from items import items
if len(command):
filename = command[0]
else:
filename = self.level.displayName + ".chests"
outFile = file(filename, "w")
print "Dumping chests..."
chestCount = 0
for i, cPos in enumerate(self.level.allChunks):
try:
chunk = self.level.getChunk(*cPos)
except mclevelbase.ChunkMalformed:
continue
for tileEntity in chunk.TileEntities:
if tileEntity["id"].value == "Chest":
chestCount += 1
outFile.write(str(map(lambda x: tileEntity[x].value, "xyz")) + "\n")
itemsTag = tileEntity["Items"]
if len(itemsTag):
for itemTag in itemsTag:
try:
id = itemTag["id"].value
damage = itemTag["Damage"].value
item = items.findItem(id, damage)
itemname = item.name
except KeyError:
itemname = "Unknown Item {0}".format(itemTag)
except Exception, e:
itemname = repr(e)
outFile.write("{0} {1}\n".format(itemTag["Count"].value, itemname))
else:
outFile.write("Empty Chest\n")
if i % 100 == 0:
print "Chunk {0}...".format(i)
print "Dumped {0} chests to {1}".format(chestCount, filename)
outFile.close()
def _removeentities(self, command):
"""
removeEntities [ [except] [ <EntityID> [ <EntityID> ... ] ] ]
Remove all entities matching one or more entity IDs.
With the except keyword, removes all entities not
matching one or more entity IDs.
Without any IDs, removes all entities in the world,
except for Paintings.
Known Mob Entity IDs:
Mob Monster Creeper Skeleton Spider Giant
Zombie Slime Pig Sheep Cow Chicken
Known Item Entity IDs: Item Arrow Snowball Painting
Known Vehicle Entity IDs: Minecart Boat
Known Dynamic Tile Entity IDs: PrimedTnt FallingSand
"""
ENT_MATCHTYPE_ANY = 0
ENT_MATCHTYPE_EXCEPT = 1
ENT_MATCHTYPE_NONPAINTING = 2
def match(entityID, matchType, matchWords):
if ENT_MATCHTYPE_ANY == matchType:
return entityID.lower() in matchWords
elif ENT_MATCHTYPE_EXCEPT == matchType:
return not (entityID.lower() in matchWords)
else:
# ENT_MATCHTYPE_EXCEPT == matchType
return entityID != "Painting"
removedEntities = {}
match_words = []
if len(command):
if command[0].lower() == "except":
command.pop(0)
print "Removing all entities except ", command
match_type = ENT_MATCHTYPE_EXCEPT
else:
print "Removing {0}...".format(", ".join(command))
match_type = ENT_MATCHTYPE_ANY
match_words = map(lambda x: x.lower(), command)
else:
print "Removing all entities except Painting..."
match_type = ENT_MATCHTYPE_NONPAINTING
for cx, cz in self.level.allChunks:
chunk = self.level.getChunk(cx, cz)
entitiesRemoved = 0
for entity in list(chunk.Entities):
entityID = entity["id"].value
if match(entityID, match_type, match_words):
removedEntities[entityID] = removedEntities.get(entityID, 0) + 1
chunk.Entities.remove(entity)
entitiesRemoved += 1
if entitiesRemoved:
chunk.chunkChanged(False)
if len(removedEntities) == 0:
print "No entities to remove."
else:
print "Removed entities:"
for entityID in sorted(removedEntities.keys()):
print " {0}: {1:6}".format(entityID, removedEntities[entityID])
self.needsSave = True
def _createchunks(self, command):
"""
createChunks <box>
Creates any chunks not present in the specified region.
New chunks are filled with only air. New chunks are written
to disk immediately.
"""
if len(command) == 0:
self.printUsage("createchunks")
return
box = self.readBox(command)
chunksCreated = self.level.createChunksInBox(box)
print "Created {0} chunks." .format(len(chunksCreated))
self.needsSave = True
def _deletechunks(self, command):
"""
deleteChunks <box>
Removes all chunks contained in the specified region.
Chunks are deleted from disk immediately.
"""
if len(command) == 0:
self.printUsage("deletechunks")
return
box = self.readBox(command)
deletedChunks = self.level.deleteChunksInBox(box)
print "Deleted {0} chunks." .format(len(deletedChunks))
def _prune(self, command):
"""
prune <box>
Removes all chunks not contained in the specified region. Useful for enforcing a finite map size.
Chunks are deleted from disk immediately.
"""
if len(command) == 0:
self.printUsage("prune")
return
box = self.readBox(command)
i = 0
for cx, cz in list(self.level.allChunks):
if cx < box.mincx or cx >= box.maxcx or cz < box.mincz or cz >= box.maxcz:
self.level.deleteChunk(cx, cz)
i += 1
print "Pruned {0} chunks." .format(i)
def _relight(self, command):
"""
relight [ <box> ]
Recalculates lights in the region specified. If omitted,
recalculates the entire world.
"""
if len(command):
box = self.readBox(command)
chunks = itertools.product(range(box.mincx, box.maxcx), range(box.mincz, box.maxcz))
else:
chunks = self.level.allChunks
self.level.generateLights(chunks)
print "Relit 0 chunks."
self.needsSave = True
def _create(self, command):
"""
create [ <filename> ]
Create and load a new Minecraft Alpha world. This world will have no
chunks and a random terrain seed. If run from the shell, filename is not
needed because you already specified a filename earlier in the command.
For example:
mce.py MyWorld create
"""
if len(command) < 1:
raise UsageError("Expected a filename")
filename = command[0]
if not os.path.exists(filename):
os.mkdir(filename)
if not os.path.isdir(filename):
raise IOError("{0} already exists".format(filename))
if mclevel.MCInfdevOldLevel.isLevel(filename):
raise IOError("{0} is already a Minecraft Alpha world".format(filename))
level = mclevel.MCInfdevOldLevel(filename, create=True)
self.level = level
def _degrief(self, command):
"""
degrief [ <height> ]
Reverse a few forms of griefing by removing
Adminium, Obsidian, Fire, and Lava wherever
they occur above the specified height.
Without a height, uses height level 32.
Removes natural surface lava.
Also see removeEntities
"""
box = self.level.bounds
box = BoundingBox(box.origin + (0, 32, 0), box.size - (0, 32, 0))
if len(command):
try:
box.miny = int(command[0])
except ValueError:
pass
print "Removing grief matter and surface lava above height {0}...".format(box.miny)
self.level.fillBlocks(box,
self.level.materials.Air,
blocksToReplace=[self.level.materials.Bedrock,
self.level.materials.Obsidian,
self.level.materials.Fire,
self.level.materials.LavaActive,
self.level.materials.Lava,
]
)
self.needsSave = True
def _time(self, command):
"""
time [time of day]
Set or display the time of day. Acceptable values are "morning", "noon",
"evening", "midnight", or a time of day such as 8:02, 12:30 PM, or 16:45.
"""
ticks = self.level.Time
timeOfDay = ticks % 24000
ageInTicks = ticks - timeOfDay
if len(command) == 0:
days = ageInTicks / 24000
hours = timeOfDay / 1000
clockHours = (hours + 6) % 24
ampm = ("AM", "PM")[clockHours > 11]
minutes = (timeOfDay % 1000) / 60
print "It is {0}:{1:02} {2} on Day {3}".format(clockHours % 12 or 12, minutes, ampm, days)
else:
times = {"morning": 6, "noon": 12, "evening": 18, "midnight": 24}
word = command[0]
minutes = 0
if word in times:
hours = times[word]
else:
try:
if ":" in word:
h, m = word.split(":")
hours = int(h)
minutes = int(m)
else:
hours = int(word)
except Exception, e:
raise UsageError(("Cannot interpret time, ", e))
if len(command) > 1:
if command[1].lower() == "pm":
hours += 12
ticks = ageInTicks + hours * 1000 + minutes * 1000 / 60 - 6000
if ticks < 0:
ticks += 18000
ampm = ("AM", "PM")[hours > 11 and hours < 24]
print "Changed time to {0}:{1:02} {2}".format(hours % 12 or 12, minutes, ampm)
self.level.Time = ticks
self.needsSave = True
def _randomseed(self, command):
"""
randomseed [ <seed> ]
Set or display the world's random seed, a 64-bit integer that uniquely
defines the world's terrain.
"""
if len(command):
try:
seed = long(command[0])
except ValueError:
raise UsageError("Expected a long integer.")
self.level.RandomSeed = seed
self.needsSave = True
else:
print "Random Seed: ", self.level.RandomSeed
def _gametype(self, command):
"""
gametype [ <player> [ <gametype> ] ]
Set or display the player's game type, an integer that identifies whether
their game is survival (0) or creative (1). On single-player worlds, the
player is just 'Player'.
"""
if len(command) == 0:
print "Players: "
for player in self.level.players:
print " {0}: {1}".format(player, self.level.getPlayerGameType(player))
return
player = command.pop(0)
if len(command) == 0:
print "Player {0}: {1}".format(player, self.level.getPlayerGameType(player))
return
try:
gametype = int(command[0])
except ValueError:
raise UsageError("Expected an integer.")
self.level.setPlayerGameType(gametype, player)
self.needsSave = True
def _worldsize(self, command):
"""
worldsize
Computes and prints the dimensions of the world. For infinite worlds,
also prints the most negative corner.
"""
bounds = self.level.bounds
if isinstance(self.level, mclevel.MCInfdevOldLevel):
print "\nWorld size: \n {0[0]:7} north to south\n {0[2]:7} east to west\n".format(bounds.size)
print "Smallest and largest points: ({0[0]},{0[2]}), ({1[0]},{1[2]})".format(bounds.origin, bounds.maximum)
else:
print "\nWorld size: \n {0[0]:7} wide\n {0[1]:7} tall\n {0[2]:7} long\n".format(bounds.size)
def _heightmap(self, command):
"""
heightmap <filename>
Takes a png and imports it as the terrain starting at chunk 0,0.
Data is internally converted to greyscale and scaled to the maximum height.
The game will fill the terrain with trees and mineral deposits the next
time you play the level.
Please please please try out a small test image before using a big source.
Using the levels tool to get a good heightmap is an art, not a science.
A smaller map lets you experiment and get it right before having to blow
all night generating the really big map.
Requires the PIL library.
"""
if len(command) == 0:
self.printUsage("heightmap")
return
if not sys.stdin.isatty() or raw_input(
"This will destroy a large portion of the map and may take a long time. Did you really want to do this?"
).lower() in ("yes", "y", "1", "true"):
from PIL import Image
import datetime
filename = command.pop(0)
imgobj = Image.open(filename)
greyimg = imgobj.convert("L") # luminance
del imgobj
width, height = greyimg.size
water_level = 64
xchunks = (height + 15) / 16
zchunks = (width + 15) / 16
start = datetime.datetime.now()
for cx in range(xchunks):
for cz in range(zchunks):
try:
self.level.createChunk(cx, cz)
except:
pass
c = self.level.getChunk(cx, cz)
imgarray = numpy.asarray(greyimg.crop((cz * 16, cx * 16, cz * 16 + 16, cx * 16 + 16)))
imgarray = imgarray / 2 # scale to 0-127
for x in range(16):
for z in range(16):
if z + (cz * 16) < width - 1 and x + (cx * 16) < height - 1:
# world dimension X goes north-south
# first array axis goes up-down
h = imgarray[x, z]
c.Blocks[x, z, h + 1:] = 0 # air
c.Blocks[x, z, h:h + 1] = 2 # grass
c.Blocks[x, z, h - 4:h] = 3 # dirt
c.Blocks[x, z, :h - 4] = 1 # rock
if h < water_level:
c.Blocks[x, z, h + 1:water_level] = 9 # water
if h < water_level + 2:
c.Blocks[x, z, h - 2:h + 1] = 12 # sand if it's near water level
c.Blocks[x, z, 0] = 7 # bedrock
c.chunkChanged()
c.TerrainPopulated = False
# the quick lighting from chunkChanged has already lit this simple terrain completely
c.needsLighting = False
logging.info("%s Just did chunk %d,%d" % (datetime.datetime.now().strftime("[%H:%M:%S]"), cx, cz))
logging.info("Done with mapping!")
self.needsSave = True
stop = datetime.datetime.now()
logging.info("Took %s." % str(stop - start))
spawnz = width / 2
spawnx = height / 2
spawny = greyimg.getpixel((spawnx, spawnz))
logging.info("You probably want to change your spawn point. I suggest {0}".format((spawnx, spawny, spawnz)))
def _execute(self, command):
"""
execute <filename>
Execute all commands in a file and save.
"""
if len(command) == 0:
print "You must give the file with commands to execute"
else:
commandFile = open(command[0], "r")
commandsFromFile = commandFile.readlines()
for commandFromFile in commandsFromFile:
print commandFromFile
self.processCommand(commandFromFile)
self._save("")
def _quit(self, command):
"""
quit [ yes | no ]
Quits the program.
Without 'yes' or 'no', prompts to save before quitting.
In batch mode, an end of file automatically saves the level.
"""
if len(command) == 0 or not (command[0].lower() in ("yes", "no")):
if raw_input("Save before exit? ").lower() in ("yes", "y", "1", "true"):
self._save(command)
raise SystemExit
if len(command) and command[0].lower == "yes":
self._save(command)
raise SystemExit
def _exit(self, command):
self._quit(command)
def _save(self, command):
if self.needsSave:
self.level.generateLights()
self.level.saveInPlace()
self.needsSave = False
def _load(self, command):
"""
load [ <filename> | <world number> ]
Loads another world, discarding all changes to this world.
"""
if len(command) == 0:
self.printUsage("load")
self.loadWorld(command[0])
def _reload(self, command):
self.level = mclevel.fromFile(self.level.filename)
def _dimension(self, command):
"""
dimension [ <dim> ]
Load another dimension, a sub-world of this level. Without options, lists
all of the dimensions found in this world. <dim> can be a number or one of
these keywords:
nether, hell, slip: DIM-1
earth, overworld, parent: parent world
end: DIM1
"""
if len(command):
if command[0].lower() in ("earth", "overworld", "parent"):
if self.level.parentWorld:
self.level = self.level.parentWorld
return
else:
print "You are already on earth."
return
elif command[0].lower() in ("hell", "nether", "slip"):
dimNo = -1
elif command[0].lower() == "end":
dimNo = 1
else:
dimNo = self.readInt(command)
if dimNo in self.level.dimensions:
self.level = self.level.dimensions[dimNo]
return
if self.level.parentWorld:
print u"Parent world: {0} ('dimension parent' to return)".format(self.level.parentWorld.displayName)
if len(self.level.dimensions):
print u"Dimensions in {0}:".format(self.level.displayName)
for k in self.level.dimensions:
print "{0}: {1}".format(k, infiniteworld.MCAlphaDimension.dimensionNames.get(k, "Unknown"))
def _help(self, command):
if len(command):
self.printUsage(command[0])
else:
self.printUsage()
def _blocks(self, command):
"""
blocks [ <block name> | <block ID> ]
Prints block IDs matching the name, or the name matching the ID.
With nothing, prints a list of all blocks.
"""
searchName = None
if len(command):
searchName = " ".join(command)
try:
searchNumber = int(searchName)
except ValueError:
searchNumber = None
matches = self.level.materials.blocksMatching(searchName)
else:
matches = [b for b in self.level.materials.allBlocks if b.ID == searchNumber]
# print "{0:3}: {1}".format(searchNumber, self.level.materials.names[searchNumber])
# return
else:
matches = self.level.materials.allBlocks
print "{id:9} : {name} {aka}".format(id="(ID:data)", name="Block name", aka="[Other names]")
for b in sorted(matches):
idstring = "({ID}:{data})".format(ID=b.ID, data=b.blockData)
aka = b.aka and " [{aka}]".format(aka=b.aka) or ""
print "{idstring:9} : {name} {aka}".format(idstring=idstring, name=b.name, aka=aka)
def printUsage(self, command=""):
if command.lower() in self.commands:
print "Usage: ", self.commandUsage(command.lower())
else:
print self.__doc__.format(commandPrefix=("", "mce.py <world> ")[not self.batchMode])
def printUsageAndQuit(self):
self.printUsage()
raise SystemExit
def loadWorld(self, world):
worldpath = os.path.expanduser(world)
if os.path.exists(worldpath):
self.level = mclevel.fromFile(worldpath)
else:
self.level = mclevel.loadWorld(world)
level = None
batchMode = False
def run(self):
logging.basicConfig(format=u'%(levelname)s:%(message)s')
logging.getLogger().level = logging.INFO
sys.argv.pop(0)
if len(sys.argv):
world = sys.argv.pop(0)
if world.lower() in ("-h", "--help"):
self.printUsageAndQuit()
if len(sys.argv) and sys.argv[0].lower() == "create":
# accept the syntax, "mce world3 create"
self._create([world])
print "Created world {0}".format(world)
sys.exit(0)
else:
self.loadWorld(world)
else:
self.batchMode = True
self.printUsage()
while True:
try:
world = raw_input("Please enter world name or path to world folder: ")
self.loadWorld(world)
except EOFError, e:
print "End of input."
raise SystemExit
except Exception, e:
print "Cannot open {0}: {1}".format(world, e)
else:
break
if len(sys.argv):
# process one command from command line
try:
self.processCommand(" ".join(sys.argv))
except UsageError:
self.printUsageAndQuit()
self._save([])
else:
# process many commands on standard input, maybe interactively
command = [""]
self.batchMode = True
while True:
try:
command = raw_input(u"{0}> ".format(self.level.displayName))
print
self.processCommand(command)
except EOFError, e:
print "End of file. Saving automatically."
self._save([])
raise SystemExit
except Exception, e:
if self.debug:
traceback.print_exc()
print 'Exception during command: {0!r}'.format(e)
print "Use 'debug' to enable tracebacks."
# self.printUsage()
def processCommand(self, command):
command = command.strip()
if len(command) == 0:
return
if command[0] == "#":
return
commandWords = command.split()
keyword = commandWords.pop(0).lower()
if not keyword in self.commands:
matches = filter(lambda x: x.startswith(keyword), self.commands)
if len(matches) == 1:
keyword = matches[0]
elif len(matches):
print "Ambiguous command. Matches: "
for k in matches:
print " ", k
return
else:
raise UsageError("Command {0} not recognized.".format(keyword))
func = getattr(self, "_" + keyword)
try:
func(commandWords)
except PlayerNotFound, e:
print "Cannot find player {0}".format(e.args[0])
self._player([])
except UsageError, e:
print e
if self.debug:
traceback.print_exc()
self.printUsage(keyword)
def main(argv):
profile = os.getenv("MCE_PROFILE", None)
editor = mce()
if profile:
print "Profiling enabled"
import cProfile
cProfile.runctx('editor.run()', locals(), globals(), profile)
else:
editor.run()
return 0
if __name__ == '__main__':
sys.exit(main(sys.argv))
|
isc
|
7ec8fe21588c28e8ea9ce5995d342c5d
| 30.773698
| 127
| 0.536609
| 4.232349
| false
| false
| false
| false
|
mcedit/pymclevel
|
box.py
|
3
|
6660
|
from collections import namedtuple
import itertools
import math
_Vector = namedtuple("_Vector", ("x", "y", "z"))
class Vector(_Vector):
__slots__ = ()
def __add__(self, other):
return Vector(self[0] + other[0], self[1] + other[1], self[2] + other[2])
def __sub__(self, other):
return Vector(self[0] - other[0], self[1] - other[1], self[2] - other[2])
def __mul__(self, other):
if isinstance(other, (int, float)):
return Vector(self[0] * other, self[1] * other, self[2] * other)
return Vector(self[0] * other[0], self[1] * other[1], self[2] * other[2])
def __truediv__(self, other):
if isinstance(other, (int, float)):
return Vector(self[0] / other, self[1] / other, self[2] / other)
return Vector(self[0] / other[0], self[1] / other[1], self[2] / other[2])
__div__ = __truediv__
def length(self):
return math.sqrt(self[0] * self[0] + self[1] * self[1] + self[2] * self[2])
def normalize(self):
l = self.length()
if l == 0: return self
return self / l
def intfloor(self):
return Vector(*[int(math.floor(p)) for p in self])
class BoundingBox (object):
type = int
def __init__(self, origin=(0, 0, 0), size=(0, 0, 0)):
if isinstance(origin, BoundingBox):
self._origin = origin._origin
self._size = origin._size
else:
self._origin, self._size = Vector(*(self.type(a) for a in origin)), Vector(*(self.type(a) for a in size))
def __repr__(self):
return "BoundingBox({0}, {1})".format(self.origin, self.size)
@property
def origin(self):
"The smallest position in the box"
return self._origin
@property
def size(self):
"The size of the box"
return self._size
@property
def width(self):
"The dimension along the X axis"
return self._size.x
@property
def height(self):
"The dimension along the Y axis"
return self._size.y
@property
def length(self):
"The dimension along the Z axis"
return self._size.z
@property
def minx(self):
return self.origin.x
@property
def miny(self):
return self.origin.y
@property
def minz(self):
return self.origin.z
@property
def maxx(self):
return self.origin.x + self.size.x
@property
def maxy(self):
return self.origin.y + self.size.y
@property
def maxz(self):
return self.origin.z + self.size.z
@property
def maximum(self):
"The largest point of the box; origin plus size."
return self._origin + self._size
@property
def volume(self):
"The volume of the box in blocks"
return self.size.x * self.size.y * self.size.z
@property
def positions(self):
"""iterate through all of the positions within this selection box"""
return itertools.product(
xrange(self.minx, self.maxx),
xrange(self.miny, self.maxy),
xrange(self.minz, self.maxz)
)
def intersect(self, box):
"""
Return a box containing the area self and box have in common. Box will have zero volume
if there is no common area.
"""
if (self.minx > box.maxx or self.maxx < box.minx or
self.miny > box.maxy or self.maxy < box.miny or
self.minz > box.maxz or self.maxz < box.minz):
#Zero size intersection.
return BoundingBox()
origin = Vector(
max(self.minx, box.minx),
max(self.miny, box.miny),
max(self.minz, box.minz),
)
maximum = Vector(
min(self.maxx, box.maxx),
min(self.maxy, box.maxy),
min(self.maxz, box.maxz),
)
#print "Intersect of {0} and {1}: {2}".format(self, box, newbox)
return BoundingBox(origin, maximum - origin)
def union(self, box):
"""
Return a box large enough to contain both self and box.
"""
origin = Vector(
min(self.minx, box.minx),
min(self.miny, box.miny),
min(self.minz, box.minz),
)
maximum = Vector(
max(self.maxx, box.maxx),
max(self.maxy, box.maxy),
max(self.maxz, box.maxz),
)
return BoundingBox(origin, maximum - origin)
def expand(self, dx, dy=None, dz=None):
"""
Return a new box with boundaries expanded by dx, dy, dz.
If only dx is passed, expands by dx in all dimensions.
"""
if dz is None:
dz = dx
if dy is None:
dy = dx
origin = self.origin - (dx, dy, dz)
size = self.size + (dx * 2, dy * 2, dz * 2)
return BoundingBox(origin, size)
def __contains__(self, pos):
x, y, z = pos
if x < self.minx or x >= self.maxx:
return False
if y < self.miny or y >= self.maxy:
return False
if z < self.minz or z >= self.maxz:
return False
return True
def __cmp__(self, b):
return cmp((self.origin, self.size), (b.origin, b.size))
# --- Chunk positions ---
@property
def mincx(self):
"The smallest chunk position contained in this box"
return self.origin.x >> 4
@property
def mincz(self):
"The smallest chunk position contained in this box"
return self.origin.z >> 4
@property
def maxcx(self):
"The largest chunk position contained in this box"
return ((self.origin.x + self.size.x - 1) >> 4) + 1
@property
def maxcz(self):
"The largest chunk position contained in this box"
return ((self.origin.z + self.size.z - 1) >> 4) + 1
def chunkBox(self, level):
"""Returns this box extended to the chunk boundaries of the given level"""
box = self
return BoundingBox((box.mincx << 4, 0, box.mincz << 4),
(box.maxcx - box.mincx << 4, level.Height, box.maxcz - box.mincz << 4))
@property
def chunkPositions(self):
#iterate through all of the chunk positions within this selection box
return itertools.product(xrange(self.mincx, self.maxcx), xrange(self.mincz, self.maxcz))
@property
def chunkCount(self):
return (self.maxcx - self.mincx) * (self.maxcz - self.mincz)
@property
def isChunkAligned(self):
return (self.origin.x & 0xf == 0) and (self.origin.z & 0xf == 0)
class FloatBox (BoundingBox):
type = float
|
isc
|
86e79510841bc3a5781cfe434696133b
| 27.220339
| 117
| 0.554204
| 3.592233
| false
| false
| false
| false
|
mcedit/mcedit
|
albow/file_dialogs.py
|
1
|
9481
|
# -*- coding: utf-8 -*-
#
# Albow - File Dialogs
#
import os
from pygame import draw, Rect
from pygame.locals import *
from albow.widget import Widget
from albow.dialogs import Dialog, ask, alert
from albow.controls import Label, Button
from albow.fields import TextField
from albow.layout import Row, Column
from albow.palette_view import PaletteView
from albow.theme import ThemeProperty
class DirPathView(Widget):
def __init__(self, width, client, **kwds):
Widget.__init__(self, **kwds)
self.set_size_for_text(width)
self.client = client
def draw(self, surf):
frame = self.get_margin_rect()
image = self.font.render(self.client.directory, True, self.fg_color)
tw = image.get_width()
mw = frame.width
if tw <= mw:
x = 0
else:
x = mw - tw
surf.blit(image, (frame.left + x, frame.top))
class FileListView(PaletteView):
#scroll_button_color = (255, 255, 0)
def __init__(self, width, client, **kwds):
font = self.predict_font(kwds)
h = font.get_linesize()
d = 2 * self.predict(kwds, 'margin')
PaletteView.__init__(self, (width - d, h), 10, 1, scrolling=True, **kwds)
self.client = client
self.selection = None
self.names = []
def update(self):
client = self.client
dir = client.directory
suffixes = client.suffixes
def filter(name):
path = os.path.join(dir, name)
return os.path.isdir(path) or self.client.filter(path)
try:
names = [name for name in os.listdir(dir) if filter(name)]
#if not name.startswith(".") and filter(name)]
except EnvironmentError, e:
alert(u"%s: %s" % (dir, e))
names = []
self.names = sorted(names)
self.selection = None
self.scroll = 0
def num_items(self):
return len(self.names)
#def draw_prehighlight(self, surf, item_no, rect):
# draw.rect(surf, self.sel_color, rect)
def draw_item(self, surf, item_no, rect):
font = self.font
color = self.fg_color
buf = self.font.render(self.names[item_no], True, color)
surf.blit(buf, rect)
def click_item(self, item_no, e):
self.selection = item_no
self.client.dir_box_click(e.num_clicks == 2)
def item_is_selected(self, item_no):
return item_no == self.selection
def get_selected_name(self):
sel = self.selection
if sel is not None:
return self.names[sel]
else:
return ""
class FileDialog(Dialog):
box_width = 250
default_prompt = None
up_button_text = ThemeProperty("up_button_text")
def __init__(self, prompt=None, suffixes=None, **kwds):
Dialog.__init__(self, **kwds)
label = None
d = self.margin
self.suffixes = suffixes or ("",)
up_button = Button(self.up_button_text, action=self.go_up)
dir_box = DirPathView(self.box_width - up_button.width - 10, self)
self.dir_box = dir_box
top_row = Row([dir_box, up_button])
list_box = FileListView(self.box_width - 16, self)
self.list_box = list_box
ctrls = [top_row, list_box]
prompt = prompt or self.default_prompt
if prompt:
label = Label(prompt)
if self.saving:
filename_box = TextField(self.box_width)
filename_box.change_action = self.update
filename_box._enter_action = filename_box.enter_action
filename_box.enter_action = self.enter_action
self.filename_box = filename_box
ctrls.append(Column([label, filename_box], align='l', spacing=0))
else:
if label:
ctrls.insert(0, label)
ok_button = Button(self.ok_label, action=self.ok, enable=self.ok_enable)
self.ok_button = ok_button
cancel_button = Button("Cancel", action=self.cancel)
vbox = Column(ctrls, align='l', spacing=d)
vbox.topleft = (d, d)
y = vbox.bottom + d
ok_button.topleft = (vbox.left, y)
cancel_button.topright = (vbox.right, y)
self.add(vbox)
self.add(ok_button)
self.add(cancel_button)
self.shrink_wrap()
self._directory = None
self.directory = os.getcwdu()
#print "FileDialog: cwd =", repr(self.directory) ###
if self.saving:
filename_box.focus()
def get_directory(self):
return self._directory
def set_directory(self, x):
x = os.path.abspath(x)
while not os.path.exists(x):
y = os.path.dirname(x)
if y == x:
x = os.getcwdu()
break
x = y
if self._directory != x:
self._directory = x
self.list_box.update()
self.update()
directory = property(get_directory, set_directory)
def filter(self, path):
suffixes = self.suffixes
if not suffixes or os.path.isdir(path):
#return os.path.isfile(path)
return True
for suffix in suffixes:
if path.endswith(suffix.lower()):
return True
def update(self):
pass
def go_up(self):
self.directory = os.path.dirname(self.directory)
self.list_box.scroll_to_item(0)
def dir_box_click(self, double):
if double:
name = self.list_box.get_selected_name()
path = os.path.join(self.directory, name)
suffix = os.path.splitext(name)[1]
if suffix not in self.suffixes and os.path.isdir(path):
self.directory = path
else:
self.double_click_file(name)
self.update()
def enter_action(self):
self.filename_box._enter_action()
self.ok()
def ok(self):
self.dir_box_click(True)
#self.dismiss(True)
def cancel(self):
self.dismiss(False)
def key_down(self, evt):
k = evt.key
if k == K_RETURN or k == K_KP_ENTER:
self.dir_box_click(True)
if k == K_ESCAPE:
self.cancel()
class FileSaveDialog(FileDialog):
saving = True
default_prompt = "Save as:"
ok_label = "Save"
def get_filename(self):
return self.filename_box.value
def set_filename(self, x):
dsuf = self.suffixes[0]
if x.endswith(dsuf):
x = x[:-len(dsuf)]
self.filename_box.value = x
filename = property(get_filename, set_filename)
def get_pathname(self):
path = os.path.join(self.directory, self.filename_box.value)
suffixes = self.suffixes
if suffixes and not path.endswith(suffixes[0]):
path = path + suffixes[0]
return path
pathname = property(get_pathname)
def double_click_file(self, name):
self.filename_box.value = name
def ok(self):
path = self.pathname
if os.path.exists(path):
answer = ask("Replace existing '%s'?" % os.path.basename(path))
if answer != "OK":
return
#FileDialog.ok(self)
self.dismiss(True)
def update(self):
FileDialog.update(self)
def ok_enable(self):
return self.filename_box.text != ""
class FileOpenDialog(FileDialog):
saving = False
ok_label = "Open"
def get_pathname(self):
name = self.list_box.get_selected_name()
if name:
return os.path.join(self.directory, name)
else:
return None
pathname = property(get_pathname)
#def update(self):
# FileDialog.update(self)
def ok_enable(self):
path = self.pathname
enabled = self.item_is_choosable(path)
return enabled
def item_is_choosable(self, path):
return bool(path) and self.filter(path)
def double_click_file(self, name):
self.dismiss(True)
class LookForFileDialog(FileOpenDialog):
target = None
def __init__(self, target, **kwds):
FileOpenDialog.__init__(self, **kwds)
self.target = target
def item_is_choosable(self, path):
return path and os.path.basename(path) == self.target
def filter(self, name):
return name and os.path.basename(name) == self.target
def request_new_filename(prompt=None, suffix=None, extra_suffixes=None,
directory=None, filename=None, pathname=None):
if pathname:
directory, filename = os.path.split(pathname)
if extra_suffixes:
suffixes = extra_suffixes
else:
suffixes = []
if suffix:
suffixes = [suffix] + suffixes
dlog = FileSaveDialog(prompt=prompt, suffixes=suffixes)
if directory:
dlog.directory = directory
if filename:
dlog.filename = filename
if dlog.present():
return dlog.pathname
else:
return None
def request_old_filename(suffixes=None, directory=None):
dlog = FileOpenDialog(suffixes=suffixes)
if directory:
dlog.directory = directory
if dlog.present():
return dlog.pathname
else:
return None
def look_for_file_or_directory(target, prompt=None, directory=None):
dlog = LookForFileDialog(target=target, prompt=prompt)
if directory:
dlog.directory = directory
if dlog.present():
return dlog.pathname
else:
return None
|
isc
|
cb8b6206c1453db370a106d742224366
| 27.217262
| 81
| 0.580213
| 3.726808
| false
| false
| false
| false
|
mozilla-services/tecken
|
tecken/upload/views.py
|
1
|
18470
|
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at https://mozilla.org/MPL/2.0/.
import re
import logging
import fnmatch
import zipfile
import hashlib
import os
import time
import concurrent.futures
import markus
from encore.concurrent.futures.synchronous import SynchronousExecutor
from django import http
from django.conf import settings
from django.utils import timezone
from django.core.exceptions import ImproperlyConfigured
from django.views.decorators.csrf import csrf_exempt
from tecken.base.decorators import (
api_login_required,
api_any_permission_required,
api_require_POST,
make_tempdir,
)
from tecken.base.utils import filesizeformat, invalid_key_name_characters
from tecken.upload.forms import UploadByDownloadForm, UploadByDownloadRemoteError
from tecken.upload.models import Upload, UploadsCreated
from tecken.upload.utils import (
dump_and_extract,
UnrecognizedArchiveFileExtension,
DuplicateFileDifferentSize,
upload_file_upload,
)
from tecken.librequests import session_with_retries
from tecken.storage import StorageBucket
logger = logging.getLogger("tecken")
metrics = markus.get_metrics("tecken")
class NoPossibleBucketName(Exception):
"""When you tried to specify a preferred bucket name but it never
matched to one you can use."""
_not_hex_characters = re.compile(r"[^a-f0-9]", re.I)
# This list of filenames is used to validate a zip and also when iterating
# over the extracted zip.
# The names of files in this list are considered harmless and something that
# can simply be ignored.
_ignorable_filenames = (".DS_Store",)
def check_symbols_archive_file_listing(file_listings):
"""return a string (the error) if there was something not as expected"""
for file_listing in file_listings:
for snippet in settings.DISALLOWED_SYMBOLS_SNIPPETS:
if snippet in file_listing.name:
return (
f"Content of archive file contains the snippet "
f"'{snippet}' which is not allowed"
)
# Now check that the filename is matching according to these rules:
# 1. Either /<name1>/hex/<name2>,
# 2. Or, /<name>-symbols.txt
# Anything else should be considered and unrecognized file pattern
# and thus rejected.
split = file_listing.name.split("/")
if split[-1] in _ignorable_filenames:
continue
if len(split) == 3:
# Check the symbol and the filename part of it to make sure
# it doesn't contain any, considered, invalid S3 characters
# when it'd become a key.
if invalid_key_name_characters(split[0] + split[2]):
return f"Invalid character in filename {file_listing.name!r}"
# Check that the middle part is only hex characters.
if not _not_hex_characters.findall(split[1]):
continue
elif len(split) == 1:
if file_listing.name.lower().endswith("-symbols.txt"):
continue
# If it didn't get "continued" above, it's an unrecognized file
# pattern.
return (
"Unrecognized file pattern. Should only be <module>/<hex>/<file> "
"or <name>-symbols.txt and nothing else. "
f"(First unrecognized pattern was {file_listing.name})"
)
def get_bucket_info(user, try_symbols=None, preferred_bucket_name=None):
"""return an object that has 'bucket', 'endpoint_url',
'region'.
Only 'bucket' is mandatory in the response object.
"""
if try_symbols is None:
# If it wasn't explicitly passed, we need to figure this out by
# looking at the user who uploads.
# Namely, we're going to see if the user has the permission
# 'upload.upload_symbols'. If the user does, it means the user intends
# to *not* upload Try build symbols.
# This is based on the axiom that, if the upload is made with an
# API token, that API token can't have *both* the
# 'upload.upload_symbols' permission *and* the
# 'upload.upload_try_symbols' permission.
# If the user uploads via the web the user has a choice to check
# a checkbox that is off by default. If doing so, the user isn't
# using an API token, so the user might have BOTH permissions.
# Then the default falls on this NOT being a Try upload.
try_symbols = not user.has_perm("upload.upload_symbols")
if try_symbols:
url = settings.UPLOAD_TRY_SYMBOLS_URL
else:
url = settings.UPLOAD_DEFAULT_URL
exceptions = settings.UPLOAD_URL_EXCEPTIONS
if preferred_bucket_name:
# If the user has indicated a preferred bucket name, check that they have
# permission to use it.
for url, _ in get_possible_bucket_urls(user):
if preferred_bucket_name in url:
return StorageBucket(url, try_symbols=try_symbols)
raise NoPossibleBucketName(preferred_bucket_name)
else:
if user.email.lower() in exceptions:
# easy
exception = exceptions[user.email.lower()]
else:
# match against every possible wildcard
exception = None # assume no match
for email_or_wildcard in settings.UPLOAD_URL_EXCEPTIONS:
if fnmatch.fnmatch(user.email.lower(), email_or_wildcard.lower()):
# a match!
exception = settings.UPLOAD_URL_EXCEPTIONS[email_or_wildcard]
break
if exception:
url = exception
return StorageBucket(url, try_symbols=try_symbols)
def get_possible_bucket_urls(user):
"""Return list of possible buckets this user can upload to.
If the user is specified in UPLOAD_URL_EXCEPTIONS, then the user can only upload
into that bucket.
If the user is not specified, then the user can upload to the public bucket.
:param user: a django user
:return: list of tuples of (url, "private"/"public")
"""
urls = []
exceptions = settings.UPLOAD_URL_EXCEPTIONS
email_lower = user.email.lower()
for email_pattern in exceptions:
if (
email_lower == email_pattern.lower()
or fnmatch.fnmatch(email_lower, email_pattern.lower())
or user.is_superuser
):
urls.append((exceptions[email_pattern], "private"))
# We use UPLOAD_URL_EXCEPTIONS to specify buckets people can upload into. If a
# person is specified in UPLOAD_URL_EXCEPTIONS, then they can only upload to that
# bucket. If they are not specified, then they can upload to the public bucket.
if not urls:
urls.append((settings.UPLOAD_DEFAULT_URL, "public"))
return urls
def _ignore_member_file(filename):
"""Return true if the given filename (could be a filepath), should
be completely ignored in the upload process.
At the moment the list is "allow-list based", meaning all files are
processed and uploaded to S3 unless it meets certain checks.
"""
if filename.lower().endswith("-symbols.txt"):
return True
if os.path.basename(filename) in _ignorable_filenames:
return True
return False
@metrics.timer_decorator("upload_archive")
@api_require_POST
@csrf_exempt
@api_login_required
@api_any_permission_required("upload.upload_symbols", "upload.upload_try_symbols")
@make_tempdir(settings.UPLOAD_TEMPDIR_PREFIX)
def upload_archive(request, upload_dir):
try:
for name in request.FILES:
upload_ = request.FILES[name]
file_listing = dump_and_extract(upload_dir, upload_, name)
size = upload_.size
url = None
redirect_urls = None
break
else:
if request.POST.get("url"):
form = UploadByDownloadForm(request.POST)
try:
is_valid = form.is_valid()
except UploadByDownloadRemoteError as exception:
return http.JsonResponse({"error": str(exception)}, status=500)
if is_valid:
url = form.cleaned_data["url"]
name = form.cleaned_data["upload"]["name"]
size = form.cleaned_data["upload"]["size"]
size_fmt = filesizeformat(size)
logger.info(f"Download to upload {url} ({size_fmt})")
redirect_urls = form.cleaned_data["upload"]["redirect_urls"] or None
download_name = os.path.join(upload_dir, name)
session = session_with_retries(default_timeout=(5, 300))
with metrics.timer("upload_download_by_url"):
response_stream = session.get(url, stream=True)
# NOTE(willkg): The UploadByDownloadForm handles most errors
# when it does a HEAD, so this mostly covers transient errors
# between the HEAD and this GET request.
if response_stream.status_code != 200:
return http.JsonResponse(
{
"error": "non-200 status code when retrieving %s"
% url
},
status=400,
)
with open(download_name, "wb") as f:
# Read 1MB at a time
chunk_size = 1024 * 1024
stream = response_stream.iter_content(chunk_size=chunk_size)
count_chunks = 0
start = time.time()
for chunk in stream:
if chunk: # filter out keep-alive new chunks
f.write(chunk)
count_chunks += 1
end = time.time()
total_size = chunk_size * count_chunks
download_speed = size / (end - start)
logger.info(
f"Read {count_chunks} chunks of "
f"{filesizeformat(chunk_size)} each "
f"totalling {filesizeformat(total_size)} "
f"({filesizeformat(download_speed)}/s)."
)
file_listing = dump_and_extract(upload_dir, download_name, name)
os.remove(download_name)
else:
for key, errors in form.errors.as_data().items():
return http.JsonResponse(
{"error": errors[0].message}, status=400
)
else:
return http.JsonResponse(
{
"error": (
"Must be multipart form data with at " "least one file"
)
},
status=400,
)
except zipfile.BadZipfile as exception:
return http.JsonResponse({"error": str(exception)}, status=400)
except UnrecognizedArchiveFileExtension as exception:
return http.JsonResponse(
{"error": f'Unrecognized archive file extension "{exception}"'}, status=400
)
except DuplicateFileDifferentSize as exception:
return http.JsonResponse({"error": str(exception)}, status=400)
error = check_symbols_archive_file_listing(file_listing)
if error:
return http.JsonResponse({"error": error.strip()}, status=400)
# If you pass an extract argument, independent of value, with key 'try'
# then we definitely knows this is a Try symbols upload.
is_try_upload = request.POST.get("try")
# If you have special permission, you can affect which bucket to upload to.
preferred_bucket_name = request.POST.get("bucket_name")
try:
bucket_info = get_bucket_info(
request.user,
try_symbols=is_try_upload,
preferred_bucket_name=preferred_bucket_name,
)
except NoPossibleBucketName as exception:
logger.warning(f"No possible bucket for {request.user!r} ({exception})")
return http.JsonResponse({"error": "No valid bucket"}, status=403)
if is_try_upload is None:
# If 'is_try_upload' isn't immediately true by looking at the
# request.POST parameters, the get_bucket_info() function can
# figure it out too.
is_try_upload = bucket_info.try_symbols
else:
# In case it's passed in as a string
is_try_upload = bool(is_try_upload)
if not bucket_info.exists():
raise ImproperlyConfigured(f"Bucket does not exist: {bucket_info!r}")
# Create the client for upload_file_upload
# TODO(jwhitlock): implement backend details in StorageBucket API
client = bucket_info.get_storage_client(
read_timeout=settings.S3_PUT_READ_TIMEOUT,
connect_timeout=settings.S3_PUT_CONNECT_TIMEOUT,
)
# Use a different client for doing the lookups.
# That's because we don't want the size lookup to severly accumulate
# in the case of there being some unpredictable slowness.
# When that happens the lookup is quickly cancelled and it assumes
# the file does not exist.
# See http://botocore.readthedocs.io/en/latest/reference/config.html#botocore.config.Config # noqa
lookup_client = bucket_info.get_storage_client(
read_timeout=settings.S3_LOOKUP_READ_TIMEOUT,
connect_timeout=settings.S3_LOOKUP_CONNECT_TIMEOUT,
)
# Every key has a prefix. If the StorageBucket instance has it's own prefix
# prefix that first :)
prefix = settings.SYMBOL_FILE_PREFIX
if bucket_info.prefix:
prefix = f"{bucket_info.prefix}/{prefix}"
# Make a hash string that represents every file listing in the archive.
# Do this by making a string first out of all files listed.
content = "\n".join(
f"{x.name}:{x.size}" for x in sorted(file_listing, key=lambda x: x.name)
)
# The MD5 is just used to make the temporary S3 file unique in name
# if the client uploads with the same filename in quick succession.
content_hash = hashlib.md5(content.encode("utf-8")).hexdigest()[:30] # nosec
# Always create the Upload object no matter what happens next.
# If all individual file uploads work out, we say this is complete.
upload_obj = Upload.objects.create(
user=request.user,
filename=name,
bucket_name=bucket_info.name,
bucket_region=bucket_info.region,
bucket_endpoint_url=bucket_info.endpoint_url,
size=size,
download_url=url,
redirect_urls=redirect_urls,
content_hash=content_hash,
try_symbols=is_try_upload,
)
ignored_keys = []
skipped_keys = []
if settings.SYNCHRONOUS_UPLOAD_FILE_UPLOAD:
# This is only applicable when running unit tests
thread_pool = SynchronousExecutor()
else:
thread_pool = concurrent.futures.ThreadPoolExecutor(
max_workers=settings.UPLOAD_FILE_UPLOAD_MAX_WORKERS or None
)
file_uploads_created = 0
uploaded_symbol_keys = []
key_to_symbol_keys = {}
with thread_pool as executor:
future_to_key = {}
for member in file_listing:
if _ignore_member_file(member.name):
ignored_keys.append(member.name)
continue
key_name = os.path.join(prefix, member.name)
# We need to know and remember, for every file attempted,
# what that name corresponds to as a "symbol key".
# A symbol key is, for example, ('xul.pdb', 'A7D6F1BBA7D6F1BB1')
symbol_key = tuple(member.name.split("/")[:2])
key_to_symbol_keys[key_name] = symbol_key
future_to_key[
executor.submit(
upload_file_upload,
client,
bucket_info.name,
key_name,
member.path,
upload=upload_obj,
client_lookup=lookup_client,
)
] = key_name
# Now lets wait for them all to finish and we'll see which ones
# were skipped and which ones were created.
for future in concurrent.futures.as_completed(future_to_key):
file_upload = future.result()
if file_upload:
file_uploads_created += 1
uploaded_symbol_keys.append(key_to_symbol_keys[file_upload.key])
else:
skipped_keys.append(future_to_key[future])
metrics.incr("upload_file_upload_skip", 1)
if file_uploads_created:
logger.info(f"Created {file_uploads_created} FileUpload objects")
else:
logger.info(f"No file uploads created for {upload_obj!r}")
Upload.objects.filter(id=upload_obj.id).update(
skipped_keys=skipped_keys or None,
ignored_keys=ignored_keys or None,
completed_at=timezone.now(),
)
# Re-calculate the UploadsCreated for today.
# FIXME(willkg): when/if we get a scheduled task runner, we should move this
# to that
date = timezone.now().date()
with metrics.timer("uploads_created_update"):
UploadsCreated.update(date)
logger.info(f"UploadsCreated updated for {date!r}")
metrics.incr(
"upload_uploads", tags=[f"try:{is_try_upload}", f"bucket:{bucket_info.name}"]
)
return http.JsonResponse({"upload": _serialize_upload(upload_obj)}, status=201)
def _serialize_upload(upload):
return {
"id": upload.id,
"size": upload.size,
"filename": upload.filename,
"bucket": upload.bucket_name,
"region": upload.bucket_region,
"download_url": upload.download_url,
"try_symbols": upload.try_symbols,
"redirect_urls": upload.redirect_urls or [],
"completed_at": upload.completed_at,
"created_at": upload.created_at,
"user": upload.user.email,
"skipped_keys": upload.skipped_keys or [],
}
|
mpl-2.0
|
4f6d74f10b6a0d8efdd103705a3ce078
| 39.862832
| 103
| 0.601354
| 4.292354
| false
| false
| false
| false
|
mozilla-services/tecken
|
tecken/api/urls.py
|
1
|
1554
|
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at https://mozilla.org/MPL/2.0/.
from django.urls import path
from . import views
app_name = "api"
# NOTE(peterbe): The endpoints that start with a '_' are basically only relevant for the
# sake of the frontend. Meaning, it doesn't make sense to use them in your curl script,
# for example.
urlpatterns = [
path("_auth/", views.auth, name="auth"),
path("stats/", views.stats, name="stats"),
path("stats/uploads", views.stats_uploads, name="stats_uploads"),
path("tokens/", views.tokens, name="tokens"),
path("tokens/token/<int:id>/extend", views.extend_token, name="extend_token"),
path("tokens/token/<int:id>", views.delete_token, name="delete_token"),
path(
"uploads/_possible_upload_urls/",
views.possible_upload_urls,
name="possible_upload_urls",
),
path("uploads/", views.uploads, name="uploads"),
path("uploads/created/", views.uploads_created, name="uploads_created"),
path(
"uploads/created/backfilled/",
views.uploads_created_backfilled,
name="uploads_created_backfilled",
),
path("uploads/files/", views.upload_files, name="upload_files"),
path("uploads/files/file/<int:id>", views.upload_file, name="upload_file"),
path("uploads/upload/<int:id>", views.upload, name="upload"),
path("downloads/missing/", views.downloads_missing, name="downloads_missing"),
]
|
mpl-2.0
|
00a39feb79b02fea9df5b35c8e15d0a0
| 38.846154
| 88
| 0.671171
| 3.656471
| false
| false
| false
| false
|
mozilla-services/tecken
|
bin/debug-sym-file.py
|
1
|
1332
|
#!/usr/bin/env python
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at https://mozilla.org/MPL/2.0/.
# Prints information about a sym file including whether it kicks up
# a parse error.
# Usage: debug-sym-file.py [SYMFILE]
import os
import click
import symbolic
@click.command()
@click.argument("symfile")
@click.pass_context
def sym_file_debug(ctx, symfile):
"""Prints information about a sym file including whether it parses correctly."""
# Print size
stats = os.stat(symfile)
click.echo(f"{symfile}")
click.echo(f"size: {stats.st_size:,}")
# Print first line
with open(symfile, "r") as fp:
firstline = fp.readline().strip()
parts = firstline.split(" ")
click.echo(f"first line: {parts}")
# Parse with symbolic and create symcache
try:
click.echo("parsing with symbolic ...")
archive = symbolic.Archive.open(symfile)
click.echo("listing objects and making symcaches ...")
for obj in archive.iter_objects():
click.echo(f"* {obj.debug_id}")
obj.make_symcache()
except Exception:
click.echo("symbolic can't parse it")
raise
if __name__ == "__main__":
sym_file_debug()
|
mpl-2.0
|
d8f1fe08f944220497e222d7f64b1937
| 26.183673
| 84
| 0.647147
| 3.609756
| false
| false
| false
| false
|
pimutils/todoman
|
todoman/formatters.py
|
1
|
9862
|
import json
from datetime import date
from datetime import datetime
from datetime import timedelta
from time import mktime
from typing import Iterable
from typing import Optional
from typing import Union
import click
import humanize
import parsedatetime
import pytz
from dateutil.tz import tzlocal
from todoman.model import Todo
from todoman.model import TodoList
def rgb_to_ansi(colour: Optional[str]) -> Optional[str]:
"""
Convert a string containing an RGB colour to ANSI escapes
"""
if not colour or not colour.startswith("#"):
return None
r, g, b = colour[1:3], colour[3:5], colour[5:7]
if not len(r) == len(g) == len(b) == 2:
return None
return f"\33[38;2;{int(r, 16)!s};{int(g, 16)!s};{int(b, 16)!s}m"
class DefaultFormatter:
def __init__(
self,
date_format="%Y-%m-%d",
time_format="%H:%M",
dt_separator=" ",
tz_override=None,
):
self.date_format = date_format
self.time_format = time_format
self.dt_separator = dt_separator
self.datetime_format = dt_separator.join(
filter(bool, (date_format, time_format))
)
self.tz = tz_override or tzlocal()
self.now = datetime.now().replace(tzinfo=self.tz)
self._parsedatetime_calendar = parsedatetime.Calendar(
version=parsedatetime.VERSION_CONTEXT_STYLE,
)
def simple_action(self, action: str, todo: Todo) -> str:
return f'{action} "{todo.summary}"'
def compact(self, todo: Todo) -> str:
return self.compact_multiple([todo])
def compact_multiple(self, todos: Iterable[Todo], hide_list=False) -> str:
# TODO: format lines fuidly and drop the table
# it can end up being more readable when too many columns are empty.
# show dates that are in the future in yellow (in 24hs) or grey (future)
table = []
for todo in todos:
completed = "X" if todo.is_completed else " "
percent = todo.percent_complete or ""
if percent:
percent = f" ({percent}%)"
if todo.categories:
categories = " [" + ", ".join(todo.categories) + "]"
else:
categories = ""
priority = click.style(
self.format_priority_compact(todo.priority),
fg="magenta",
)
due = self.format_datetime(todo.due) or "(no due date)"
now = self.now if isinstance(todo.due, datetime) else self.now.date()
due_colour = None
if todo.due:
if todo.due <= now and not todo.is_completed:
due_colour = "red"
elif todo.due >= now + timedelta(hours=24):
due_colour = "white"
elif todo.due >= now:
due_colour = "yellow"
else:
due_colour = "white"
if due_colour:
due = click.style(str(due), fg=due_colour)
recurring = "⟳" if todo.is_recurring else ""
if hide_list:
summary = "{} {}".format(
todo.summary,
percent,
)
else:
if not todo.list:
raise ValueError("Cannot format todo without a list")
summary = "{} {}{}".format(
todo.summary,
self.format_database(todo.list),
percent,
)
# TODO: add spaces on the left based on max todos"
# FIXME: double space when no priority
# split into parts to satisfy linter line too long
table.append(
f"[{completed}] {todo.id} {priority} {due} "
f"{recurring}{summary}{categories}"
)
return "\n".join(table)
def _format_multiline(self, title: str, value: str) -> str:
formatted_title = click.style(title, fg="white")
if value.strip().count("\n") == 0:
return f"\n\n{formatted_title}: {value}"
else:
return f"\n\n{formatted_title}:\n{value}"
def detailed(self, todo: Todo) -> str:
"""Returns a detailed representation of a task.
:param todo: The todo component.
"""
extra_lines = []
if todo.description:
extra_lines.append(self._format_multiline("Description", todo.description))
if todo.location:
extra_lines.append(self._format_multiline("Location", todo.location))
return f"{self.compact(todo)}{''.join(extra_lines)}"
def format_datetime(self, dt: Optional[date]) -> Union[str, int, None]:
if not dt:
return ""
elif isinstance(dt, datetime):
return dt.strftime(self.datetime_format)
elif isinstance(dt, date):
return dt.strftime(self.date_format)
def format_categories(self, categories):
return ", ".join(categories)
def parse_categories(self, categories):
# existing code assumes categories is list,
# but click passes tuple
return list(categories)
def parse_priority(self, priority: Optional[str]) -> Optional[int]:
if priority is None or priority == "":
return None
if priority == "low":
return 9
elif priority == "medium":
return 5
elif priority == "high":
return 4
elif priority == "none":
return 0
else:
raise ValueError("Priority has to be one of low, medium, high or none")
def format_priority(self, priority: Optional[int]) -> str:
if not priority:
return "none"
elif 1 <= priority <= 4:
return "high"
elif priority == 5:
return "medium"
elif 6 <= priority <= 9:
return "low"
raise ValueError("priority is an invalid value")
def format_priority_compact(self, priority: Optional[int]) -> str:
if not priority:
return ""
elif 1 <= priority <= 4:
return "!!!"
elif priority == 5:
return "!!"
elif 6 <= priority <= 9:
return "!"
raise ValueError("priority is an invalid value")
def parse_datetime(self, dt: str) -> Optional[date]:
if not dt:
return None
rv = self._parse_datetime_naive(dt)
return rv.replace(tzinfo=self.tz) if isinstance(rv, datetime) else rv
def _parse_datetime_naive(self, dt: str) -> date:
"""Parse dt and returns a naive datetime or a date"""
try:
return datetime.strptime(dt, self.datetime_format)
except ValueError:
pass
try:
return datetime.strptime(dt, self.date_format).date()
except ValueError:
pass
try:
return datetime.combine(
self.now.date(), datetime.strptime(dt, self.time_format).time()
)
except ValueError:
pass
rv, pd_ctx = self._parsedatetime_calendar.parse(dt)
if not pd_ctx.hasDateOrTime:
raise ValueError(f"Time description not recognized: {dt}")
return datetime.fromtimestamp(mktime(rv))
def format_database(self, database: TodoList):
return "{}@{}".format(
rgb_to_ansi(database.colour) or "", click.style(database.name)
)
class HumanizedFormatter(DefaultFormatter):
def format_datetime(self, dt: Optional[date]) -> str:
if not dt:
return ""
if isinstance(dt, datetime):
rv = humanize.naturaltime(self.now - dt)
if " from now" in rv:
rv = f"in {rv[:-9]}"
elif isinstance(dt, date):
rv = humanize.naturaldate(dt)
return rv
class PorcelainFormatter(DefaultFormatter):
def _todo_as_dict(self, todo):
return {
"completed": todo.is_completed,
"due": self.format_datetime(todo.due),
"id": todo.id,
"list": todo.list.name,
"percent": todo.percent_complete,
"summary": todo.summary,
"categories": todo.categories,
"priority": todo.priority,
"location": todo.location,
"description": todo.description,
"completed_at": self.format_datetime(todo.completed_at),
}
def compact(self, todo: Todo) -> str:
return json.dumps(self._todo_as_dict(todo), indent=4, sort_keys=True)
def compact_multiple(self, todos: Iterable[Todo], hide_list=False) -> str:
data = [self._todo_as_dict(todo) for todo in todos]
return json.dumps(data, indent=4, sort_keys=True)
def simple_action(self, action: str, todo: Todo) -> str:
return self.compact(todo)
def parse_priority(self, priority: Optional[str]) -> Optional[int]:
if priority is None:
return None
try:
if int(priority) in range(0, 10):
return int(priority)
else:
raise ValueError("Priority has to be in the range 0-9")
except ValueError as e:
raise click.BadParameter(str(e))
def detailed(self, todo: Todo) -> str:
return self.compact(todo)
def format_datetime(self, value: Optional[date]) -> Optional[int]:
if value:
if not isinstance(value, datetime):
dt = datetime.fromordinal(value.toordinal())
else:
dt = value
return int(dt.timestamp())
else:
return None
def parse_datetime(self, value):
if value:
return datetime.fromtimestamp(value, tz=pytz.UTC)
else:
return None
|
isc
|
d7abd37a0c7881a902a95755c83ae461
| 30.70418
| 87
| 0.549899
| 4.199319
| false
| false
| false
| false
|
pimutils/todoman
|
todoman/model.py
|
1
|
35300
|
from __future__ import annotations
import logging
import os
import socket
import sqlite3
from datetime import date
from datetime import datetime
from datetime import time
from datetime import timedelta
from os.path import normpath
from os.path import split
from typing import Iterable
from uuid import uuid4
import icalendar
import pytz
from atomicwrites import AtomicWriter
from dateutil.rrule import rrulestr
from dateutil.tz import tzlocal
from todoman import exceptions
logger = logging.getLogger(name=__name__)
# Initialize this only once
# We were doing this all over the place (even if unused!), so at least only do
# it once.
LOCAL_TIMEZONE = tzlocal()
class cached_property:
"""A read-only @property that is only evaluated once. Only usable on class
instances' methods.
"""
def __init__(self, fget, doc=None):
self.__name__ = fget.__name__
self.__module__ = fget.__module__
self.__doc__ = doc or fget.__doc__
self.fget = fget
def __get__(self, obj, cls):
if obj is None:
return self
obj.__dict__[self.__name__] = result = self.fget(obj)
return result
class Todo:
"""
Represents a task/todo, and wrapps around icalendar.Todo.
All text attributes are always treated as text, and "" will be returned if
they are not defined.
Date attributes are treated as datetime objects, and None will be returned
if they are not defined.
All datetime objects have tzinfo, either the one defined in the file, or
the local system's one.
"""
categories: list[str]
completed_at: datetime | None
created_at: datetime | None
due: date | None
dtstamp: datetime | None
last_modified: datetime | None
related: list[Todo]
rrule: str | None
start: date | None
def __init__(
self,
filename: str = None,
mtime: int = None,
new: bool = False,
list: TodoList = None,
):
"""
Creates a new todo using `todo` as a source.
:param str filename: The name of the file for this todo. Defaults to
the <uid>.ics
:param mtime int: The last modified time for the file backing this
Todo.
:param bool new: Indicate that a new Todo is being created and should
be populated with default values.
:param TodoList list: The list to which this Todo belongs.
"""
self.list = list
now = datetime.now(LOCAL_TIMEZONE)
self.uid = f"{uuid4().hex}@{socket.gethostname()}"
if new:
self.created_at = now
else:
self.created_at = None
# Default values for supported fields
self.categories = []
self.completed_at = None
self.description = ""
self.dtstamp = now
self.due = None
self.id = None
self.last_modified = None
self.location = ""
self.percent_complete = 0
self.priority = 0
self.rrule = ""
self.sequence = 0
self.start = None
self.status = "NEEDS-ACTION"
self.summary = ""
self.filename = filename or f"{self.uid}.ics"
self.related = []
if os.path.basename(self.filename) != self.filename:
raise ValueError(f"Must not be an absolute path: {self.filename}")
self.mtime = mtime or datetime.now()
def clone(self) -> Todo:
"""
Returns a clone of this todo
Returns a copy of this todo, which is almost identical, except that is
has a different UUID and filename.
"""
todo = Todo(new=True, list=self.list)
fields = (
Todo.STRING_FIELDS
+ Todo.INT_FIELDS
+ Todo.LIST_FIELDS
+ Todo.DATETIME_FIELDS
)
fields.remove("uid")
for field in fields:
setattr(todo, field, getattr(self, field))
return todo
STRING_FIELDS = [
"description",
"location",
"status",
"summary",
"uid",
"rrule",
]
INT_FIELDS = [
"percent_complete",
"priority",
"sequence",
]
LIST_FIELDS = [
"categories",
]
DATETIME_FIELDS = [
"completed_at",
"created_at",
"dtstamp",
"start",
"due",
"last_modified",
]
RRULE_FIELDS = [
"rrule",
]
ALL_SUPPORTED_FIELDS = (
DATETIME_FIELDS + INT_FIELDS + LIST_FIELDS + RRULE_FIELDS + STRING_FIELDS
)
VALID_STATUSES = (
"CANCELLED",
"COMPLETED",
"IN-PROCESS",
"NEEDS-ACTION",
)
def __setattr__(self, name: str, value):
"""Check type and avoid setting fields to None"""
"""when that is not a valid attribue."""
v = value
if name in Todo.RRULE_FIELDS:
if value is None:
v = ""
else:
assert isinstance(
value, str
), f"Got {type(value)} for {name} where str was expected"
if name in Todo.STRING_FIELDS:
if value is None:
v = ""
else:
assert isinstance(
value, str
), f"Got {type(value)} for {name} where str was expected"
if name in Todo.INT_FIELDS:
if value is None:
v = 0
else:
assert isinstance(
value, int
), f"Got {type(value)} for {name} where int was expected"
if name in Todo.LIST_FIELDS:
if value is None:
v = []
else:
assert isinstance(
value, list
), f"Got {type(value)} for {name} where list was expected"
return object.__setattr__(self, name, v)
@property
def is_completed(self) -> bool:
return bool(self.completed_at) or self.status in ("CANCELLED", "COMPLETED")
@property
def is_recurring(self) -> bool:
return bool(self.rrule)
def _apply_recurrence_to_dt(self, dt) -> datetime | None:
if not dt:
return None
recurrence = rrulestr(self.rrule, dtstart=dt)
if isinstance(dt, date) and not isinstance(dt, datetime):
dt = datetime.combine(dt, time.min)
return recurrence.after(dt)
def _create_next_instance(self):
copy = self.clone()
copy.due = self._apply_recurrence_to_dt(self.due)
copy.start = self._apply_recurrence_to_dt(self.start)
assert copy.uid != self.uid
# TODO: Push copy's alarms.
return copy
def complete(self) -> None:
"""
Immediately completes this todo
Immediately marks this todo as completed, sets the percentage to 100%
and the completed_at datetime to now.
If this todo belongs to a series, newly created todo are added to the
``related`` list.
"""
if self.is_recurring:
related = self._create_next_instance()
if related:
self.rrule = None
self.related.append(related)
self.completed_at = datetime.now(tz=LOCAL_TIMEZONE)
self.percent_complete = 100
self.status = "COMPLETED"
@cached_property
def path(self) -> str:
if not self.list:
raise ValueError("A todo without a list does not have a path.")
return os.path.join(self.list.path, self.filename)
def cancel(self) -> None:
self.status = "CANCELLED"
class VtodoWriter:
"""Writes a Todo as a VTODO file."""
"""Maps Todo field names to VTODO field names"""
FIELD_MAP = {
"summary": "summary",
"priority": "priority",
"sequence": "sequence",
"uid": "uid",
"categories": "categories",
"completed_at": "completed",
"description": "description",
"dtstamp": "dtstamp",
"start": "dtstart",
"due": "due",
"location": "location",
"percent_complete": "percent-complete",
"priority": "priority",
"status": "status",
"created_at": "created",
"last_modified": "last-modified",
"rrule": "rrule",
}
def __init__(self, todo: Todo):
self.todo = todo
def normalize_datetime(self, dt: date) -> date:
"""
Eliminate several differences between dates, times and datetimes which
are hindering comparison:
- Convert everything to datetime
- Add missing timezones
- Cast to UTC
Datetimes are cast to UTC because icalendar doesn't include the
VTIMEZONE information upon serialization, and some clients have issues
dealing with that.
"""
if isinstance(dt, date) and not isinstance(dt, datetime):
return dt
if not dt.tzinfo:
dt = dt.replace(tzinfo=LOCAL_TIMEZONE)
return dt.astimezone(pytz.UTC)
def serialize_field(self, name: str, value):
if name in Todo.RRULE_FIELDS:
return icalendar.vRecur.from_ical(value)
if name in Todo.DATETIME_FIELDS:
return self.normalize_datetime(value)
if name in Todo.LIST_FIELDS:
return value
if name in Todo.INT_FIELDS:
return int(value)
if name in Todo.STRING_FIELDS:
return value
raise Exception(f"Unknown field {name} serialized.")
def set_field(self, name: str, value):
# If serialized value is None:
self.vtodo.pop(name)
if value:
logger.debug("Setting field %s to %s.", name, value)
self.vtodo.add(name, value)
def serialize(self, original=None):
"""Serialize a Todo into a VTODO."""
if not original:
original = icalendar.Todo()
self.vtodo = original
for source, target in self.FIELD_MAP.items():
self.vtodo.pop(target)
if getattr(self.todo, source):
self.set_field(
target,
self.serialize_field(source, getattr(self.todo, source)),
)
return self.vtodo
def _read(self, path):
with open(path, "rb") as f:
cal = f.read()
cal = icalendar.Calendar.from_ical(cal)
for component in cal.walk("VTODO"):
return component
def write(self):
if os.path.exists(self.todo.path):
self._write_existing(self.todo.path)
else:
self._write_new(self.todo.path)
return self.vtodo
def _write_existing(self, path):
original = self._read(path)
vtodo = self.serialize(original)
with open(path, "rb") as f:
cal = icalendar.Calendar.from_ical(f.read())
for index, component in enumerate(cal.subcomponents):
if component.get("uid", None) == self.todo.uid:
cal.subcomponents[index] = vtodo
with AtomicWriter(path, "wb", overwrite=True).open() as f:
f.write(cal.to_ical())
def _write_new(self, path):
vtodo = self.serialize()
c = icalendar.Calendar()
c.add_component(vtodo)
with AtomicWriter(path, "wb").open() as f:
c.add("prodid", "io.barrera.todoman")
c.add("version", "2.0")
f.write(c.to_ical())
return vtodo
class Cache:
"""
Caches Todos for faster read and simpler querying interface
The Cache class persists relevant[1] fields into an SQL database, which is
only updated if the actual file has been modified. This greatly increases
load times, but, more importantly, provides a simpler interface for
filtering/querying/sorting.
[1]: Relevant fields are those we show when listing todos, or those which
may be used for filtering/sorting.
"""
SCHEMA_VERSION = 9
def __init__(self, path: str):
self.cache_path = str(path)
os.makedirs(os.path.dirname(self.cache_path), exist_ok=True)
self._conn = sqlite3.connect(self.cache_path)
self._conn.row_factory = sqlite3.Row
self._conn.execute("PRAGMA foreign_keys = ON")
self.create_tables()
def save_to_disk(self) -> None:
self._conn.commit()
def is_latest_version(self):
"""Checks if the cache DB schema is the latest version."""
try:
return self._conn.execute(
"SELECT version FROM meta WHERE version = ?",
(Cache.SCHEMA_VERSION,),
).fetchone()
except sqlite3.OperationalError:
return False
def drop_tables(self):
self._conn.executescript(
"""
DROP TABLE IF EXISTS todos;
DROP TABLE IF EXISTS lists;
DROP TABLE IF EXISTS files;
DROP TABLE IF EXISTS categories;
"""
)
def create_tables(self):
if self.is_latest_version():
return
self.drop_tables()
self._conn.execute('CREATE TABLE IF NOT EXISTS meta ("version" INT)')
self._conn.execute(
"INSERT INTO meta (version) VALUES (?)",
(Cache.SCHEMA_VERSION,),
)
self._conn.execute(
"""
CREATE TABLE IF NOT EXISTS lists (
"name" TEXT PRIMARY KEY,
"path" TEXT,
"colour" TEXT,
"mtime" INTEGER,
CONSTRAINT path_unique UNIQUE (path)
);
"""
)
self._conn.execute(
"""
CREATE TABLE IF NOT EXISTS files (
"path" TEXT PRIMARY KEY,
"list_name" TEXT,
"mtime" INTEGER,
CONSTRAINT path_unique UNIQUE (path),
FOREIGN KEY(list_name) REFERENCES lists(name) ON DELETE CASCADE
);
"""
)
self._conn.execute(
"""
CREATE TABLE IF NOT EXISTS categories (
"id" INTEGER NOT NULL PRIMARY KEY AUTOINCREMENT,
"todos_id" INTEGER NOT NULL,
"category" TEXT,
CONSTRAINT category_unique UNIQUE (todos_id,category),
FOREIGN KEY(todos_id) REFERENCES todos(id) ON DELETE CASCADE
);
"""
)
self._conn.execute(
"""
CREATE TABLE IF NOT EXISTS todos (
"file_path" TEXT,
"id" INTEGER PRIMARY KEY,
"uid" TEXT,
"summary" TEXT,
"due" INTEGER,
"due_dt" INTEGER,
"start" INTEGER,
"start_dt" INTEGER,
"priority" INTEGER,
"created_at" INTEGER,
"completed_at" INTEGER,
"percent_complete" INTEGER,
"dtstamp" INTEGER,
"status" TEXT,
"description" TEXT,
"location" TEXT,
"sequence" INTEGER,
"last_modified" INTEGER,
"rrule" TEXT,
FOREIGN KEY(file_path) REFERENCES files(path) ON DELETE CASCADE
);
"""
)
def clear(self):
self._conn.close()
os.remove(self.cache_path)
self._conn = None
def add_list(self, name: str, path: str, colour: str, mtime: int):
"""
Inserts a new list into the cache.
Returns the id of the newly inserted list.
"""
result = self._conn.execute(
"SELECT name FROM lists WHERE path = ?",
(path,),
).fetchone()
if result:
return result["name"]
try:
self._conn.execute(
"""
INSERT INTO lists (
name,
path,
colour,
mtime
) VALUES (?, ?, ?, ?)
""",
(
name,
path,
colour,
mtime,
),
)
except sqlite3.IntegrityError as e:
raise exceptions.AlreadyExists("list", name) from e
return self.add_list(name, path, colour, mtime)
def add_file(self, list_name: str, path: str, mtime: int):
try:
self._conn.execute(
"""
INSERT INTO files (
list_name,
path,
mtime
) VALUES (?, ?, ?);
""",
(
list_name,
path,
mtime,
),
)
except sqlite3.IntegrityError as e:
raise exceptions.AlreadyExists("file", list_name) from e
def add_category(self, todos_id, category):
try:
self._conn.execute(
"""
INSERT INTO categories (
todos_id,
category
) VALUES (?, ?);
""",
(todos_id, category),
)
except sqlite3.IntegrityError as e:
raise exceptions.AlreadyExists("category", category) from e
def _serialize_datetime(
self,
todo: icalendar.Todo,
field: str,
) -> tuple[int | None, bool | None]:
"""
Serialize a todo field in two value, the first one is the corresponding
timestamp, the second one is a boolean indicating if the serialized
value is a date or a datetime.
:param icalendar.Todo todo: An icalendar component object
:param str field: The name of the field to serialize
"""
dt = todo.decoded(field, None)
if not dt:
return None, None
is_date = isinstance(dt, date) and not isinstance(dt, datetime)
if is_date:
dt = datetime(dt.year, dt.month, dt.day)
if not dt.tzinfo:
dt = dt.replace(tzinfo=LOCAL_TIMEZONE)
return dt.timestamp(), is_date
def _serialize_rrule(self, todo, field) -> str | None:
rrule = todo.get(field)
if not rrule:
return None
return rrule.to_ical().decode()
def add_vtodo(self, todo: icalendar.Todo, file_path: str, id=None) -> int:
"""
Adds a todo into the cache.
:param icalendar.Todo todo: The icalendar component object on which
"""
sql = """
INSERT INTO todos (
{}
file_path,
uid,
summary,
due,
due_dt,
start,
start_dt,
priority,
created_at,
completed_at,
percent_complete,
dtstamp,
status,
description,
location,
sequence,
last_modified,
rrule
) VALUES ({}?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?,
?)
"""
due, due_dt = self._serialize_datetime(todo, "due")
start, start_dt = self._serialize_datetime(todo, "dtstart")
if start and due:
start = None if start >= due else start
params = [
file_path,
todo.get("uid"),
todo.get("summary"),
due,
due_dt,
start,
start_dt,
todo.get("priority", 0) or None,
self._serialize_datetime(todo, "created")[0],
self._serialize_datetime(todo, "completed")[0],
todo.get("percent-complete", None),
self._serialize_datetime(todo, "dtstamp")[0],
todo.get("status", "NEEDS-ACTION"),
todo.get("description", None),
todo.get("location", None),
todo.get("sequence", 1),
self._serialize_datetime(todo, "last-modified")[0],
self._serialize_rrule(todo, "rrule"),
]
if id:
params = [id] + params
sql = sql.format("id,\n", "?, ")
else:
sql = sql.format("", "")
cursor = self._conn.cursor()
try:
cursor.execute(sql, params)
rv = cursor.lastrowid
assert rv is not None
finally:
cursor.close()
if todo.get("categories"):
for category in todo.get("categories").cats:
self.add_category(rv, category)
return rv
def todos(
self,
lists=(),
categories=None,
priority=None,
location="",
grep="",
sort=(),
reverse=True,
due=None,
start=None,
startable=False,
status="NEEDS-ACTION,IN-PROCESS",
) -> Iterable[Todo]:
"""
Returns filtered cached todos, in a specified order.
If no order is specified, todos are sorted by the following fields::
completed_at
-priority
due
-created_at
:param list lists: Only return todos for these lists.
:param str location: Only return todos with a location containing this
string.
:param str categories: Only return todos with a category containing this
string.
:param str grep: Filter common fields with this substring.
:param list sort: Order returned todos by these fields. Field names
with a ``-`` prepended will be used to sort in reverse order.
:param bool reverse: Reverse the order of the todos after sorting.
:param int due: Return only todos due within ``due`` hours.
:param str priority: Only return todos with priority at least as
high as specified.
:param tuple(bool, datetime) start: Return only todos before/after
``start`` date
:param list(str) status: Return only todos with any of the given
statuses.
:return: A sorted, filtered list of todos.
:rtype: generator
"""
extra_where = []
params: list = []
if "ANY" not in status:
statuses = status.split(",")
extra_where.append(
"AND (status IN ({}) OR status IS NULL)".format(
", ".join(["?"] * len(statuses))
)
)
params.extend(s.upper() for s in statuses)
if lists:
lists = [
list_.name if isinstance(list_, TodoList) else list_ for list_ in lists
]
q = ", ".join(["?"] * len(lists))
extra_where.append(f"AND files.list_name IN ({q})")
params.extend(lists)
if categories:
category_slots = ", ".join(["?"] * len(categories))
extra_where.append(
"AND upper(categories.category) IN ({category_slots})".format(
category_slots=category_slots
)
)
params = params + [category.upper() for category in categories]
if priority:
extra_where.append("AND PRIORITY > 0 AND PRIORITY <= ?")
params.append(f"{priority}")
if location:
extra_where.append("AND location LIKE ?")
params.append(f"%{location}%")
if grep:
# # requires sqlite with pcre, which won't be available everywhere:
# extra_where.append('AND summary REGEXP ?')
# params.append(grep)
extra_where.append("AND summary LIKE ?")
params.append(f"%{grep}%")
if due:
max_due = (datetime.now() + timedelta(hours=due)).timestamp()
extra_where.append("AND due IS NOT NULL AND due < ?")
params.append(max_due)
if start:
is_before, dt = start
dt = dt.timestamp()
if is_before:
extra_where.append("AND start <= ?")
params.append(dt)
else:
extra_where.append("AND start >= ?")
params.append(dt)
if startable:
extra_where.append("AND (start IS NULL OR start <= ?)")
params.append(datetime.now().timestamp())
if sort:
order_items = []
for s in sort:
if s.startswith("-"):
order_items.append(f" {s[1:]} ASC")
else:
order_items.append(f" {s} DESC")
order = ",".join(order_items)
else:
order = """
completed_at DESC,
priority IS NOT NULL, priority DESC,
due IS NOT NULL, due DESC,
created_at ASC
"""
if not reverse:
# Note the change in case to avoid swapping all of them. sqlite
# doesn't care about casing anyway.
order = order.replace(" DESC", " asc").replace(" ASC", " desc")
query = """
SELECT DISTINCT todos.*, files.list_name, files.path,
group_concat(category) AS categories
FROM todos, files
LEFT JOIN categories
ON categories.todos_id = todos.id
WHERE todos.file_path = files.path {}
GROUP BY uid
ORDER BY {}
""".format(
" ".join(extra_where),
order,
)
logger.debug(query)
logger.debug(params)
result = self._conn.execute(query, params)
seen_paths = set()
warned_paths = set()
for row in result:
todo = self._todo_from_db(row)
path = row["path"]
if path in seen_paths and path not in warned_paths:
logger.warning(
"Todo is in read-only mode because there are multiple todos in %s",
path,
)
warned_paths.add(path)
seen_paths.add(path)
yield todo
def _datetime_from_db(self, dt) -> datetime | None:
if dt:
return datetime.fromtimestamp(dt, LOCAL_TIMEZONE)
return None
def _date_from_db(self, dt, is_date=False) -> date | None:
"""Deserialise a date (possible datetime)."""
if not dt:
return dt
if is_date:
return datetime.fromtimestamp(dt, LOCAL_TIMEZONE).date()
else:
return datetime.fromtimestamp(dt, LOCAL_TIMEZONE)
def _categories_from_db(self, categories):
if categories:
return categories.split(",")
return []
def _todo_from_db(self, row: dict) -> Todo:
todo = Todo()
todo.id = row["id"]
todo.uid = row["uid"]
todo.summary = row["summary"]
todo.due = self._date_from_db(row["due"], row["due_dt"])
todo.start = self._date_from_db(row["start"], row["start_dt"])
todo.categories = self._categories_from_db(row["categories"])
todo.priority = row["priority"]
todo.created_at = self._datetime_from_db(row["created_at"])
todo.completed_at = self._datetime_from_db(row["completed_at"])
todo.dtstamp = self._datetime_from_db(row["dtstamp"])
todo.percent_complete = row["percent_complete"]
todo.status = row["status"]
todo.description = row["description"]
todo.location = row["location"]
logger.debug("todo.categories: %s\n", todo.categories)
todo.sequence = row["sequence"]
todo.last_modified = row["last_modified"]
todo.list = self.lists_map[row["list_name"]]
todo.filename = os.path.basename(row["path"])
todo.rrule = row["rrule"]
return todo
def lists(self) -> Iterable[TodoList]:
result = self._conn.execute("SELECT * FROM lists")
for row in result:
yield TodoList(
name=row["name"],
path=row["path"],
colour=row["colour"],
)
@cached_property
def lists_map(self) -> dict[str, TodoList]:
return {list_.name: list_ for list_ in self.lists()}
def expire_lists(self, paths: dict[str, int]) -> None:
results = self._conn.execute("SELECT path, name, mtime from lists")
for result in results:
if result["path"] not in paths:
self.delete_list(result["name"])
else:
mtime = paths.get(result["path"])
if mtime and mtime > result["mtime"]:
self.delete_list(result["name"])
def delete_list(self, name: str) -> None:
self._conn.execute("DELETE FROM lists WHERE lists.name = ?", (name,))
def todo(self, id: int, read_only=False) -> Todo:
# XXX: DON'T USE READ_ONLY
result = self._conn.execute(
"""
SELECT todos.*, files.list_name, files.path,
group_concat(category) AS categories
FROM todos, files
LEFT JOIN categories
ON categories.todos_id = todos.id
WHERE files.path = todos.file_path
AND todos.id = ?
GROUP BY uid
""",
(id,),
).fetchone()
if not result:
raise exceptions.NoSuchTodo(id)
if not read_only:
count = self._conn.execute(
"""
SELECT count(id) AS c
FROM files, todos
WHERE todos.file_path = files.path
AND path=?
""",
(result["path"],),
).fetchone()
if count["c"] > 1:
raise exceptions.ReadOnlyTodo(result["path"])
return self._todo_from_db(result)
def expire_files(self, paths_to_mtime: dict[str, int]) -> None:
"""Remove stale cache entries based on the given fresh data."""
result = self._conn.execute("SELECT path, mtime FROM files")
for row in result:
path, mtime = row["path"], row["mtime"]
if paths_to_mtime.get(path, None) != mtime:
self.expire_file(path)
def expire_file(self, path: str) -> None:
self._conn.execute("DELETE FROM files WHERE path = ?", (path,))
class TodoList:
def __init__(self, name: str, path: str, colour: str = None):
self.path = path
self.name = name
self.colour = colour
@staticmethod
def colour_for_path(path: str) -> str | None:
try:
with open(os.path.join(path, "color")) as f:
return f.read().strip()
except OSError:
logger.debug("No colour for list %s", path)
@staticmethod
def name_for_path(path: str) -> str:
try:
with open(os.path.join(path, "displayname")) as f:
return f.read().strip()
except OSError:
return split(normpath(path))[1]
@staticmethod
def mtime_for_path(path: str) -> int:
colour_file = os.path.join(path, "color")
display_file = os.path.join(path, "displayname")
mtimes = []
if os.path.exists(colour_file):
mtimes.append(_getmtime(colour_file))
if os.path.exists(display_file):
mtimes.append(_getmtime(display_file))
if mtimes:
return max(mtimes)
else:
return 0
def __eq__(self, other) -> bool:
if isinstance(other, TodoList):
return self.name == other.name
return object.__eq__(self, other)
def __str__(self) -> str:
return self.name
class Database:
"""
This class is essentially a wrapper around all the lists (which in turn,
contain all the todos).
Caching in abstracted inside this class, and is transparent to outside
classes.
"""
def __init__(self, paths, cache_path):
self.cache = Cache(cache_path)
self.paths = [str(path) for path in paths]
self.update_cache()
def update_cache(self) -> None:
paths = {path: TodoList.mtime_for_path(path) for path in self.paths}
self.cache.expire_lists(paths)
paths_to_mtime = {}
paths_to_list_name = {}
for path in self.paths:
list_name = self.cache.add_list(
TodoList.name_for_path(path),
path,
TodoList.colour_for_path(path),
paths[path],
)
for entry in os.listdir(path):
if not entry.endswith(".ics"):
continue
entry_path = os.path.join(path, entry)
mtime = _getmtime(entry_path)
paths_to_mtime[entry_path] = mtime
paths_to_list_name[entry_path] = list_name
self.cache.expire_files(paths_to_mtime)
for entry_path, mtime in paths_to_mtime.items():
list_name = paths_to_list_name[entry_path]
try:
self.cache.add_file(list_name, entry_path, mtime)
except exceptions.AlreadyExists:
logger.debug("File already in cache: %s", entry_path)
continue
try:
with open(entry_path, "rb") as f:
cal = icalendar.Calendar.from_ical(f.read())
for component in cal.walk("VTODO"):
self.cache.add_vtodo(component, entry_path)
except Exception:
logger.exception("Failed to read entry %s.", entry_path)
self.cache.save_to_disk()
def todos(self, **kwargs) -> Iterable[Todo]:
return self.cache.todos(**kwargs)
def todo(self, id: int, **kwargs) -> Todo:
return self.cache.todo(id, **kwargs)
def lists(self) -> Iterable[TodoList]:
return self.cache.lists()
def move(self, todo: Todo, new_list: TodoList, from_list: TodoList) -> None:
orig_path = os.path.join(from_list.path, todo.filename)
dest_path = os.path.join(new_list.path, todo.filename)
os.rename(orig_path, dest_path)
def delete(self, todo: Todo) -> None:
if not todo.list:
raise ValueError("Cannot delete Todo without a list.")
path = os.path.join(todo.list.path, todo.filename)
os.remove(path)
def flush(self) -> Iterable[Todo]:
for todo in self.todos(status=["ANY"]):
if todo.is_completed:
yield todo
self.delete(todo)
self.cache.clear()
self.cache = None
def save(self, todo: Todo) -> None:
if not todo.list:
raise ValueError("Cannot save Todo without a list.")
for related in todo.related:
self.save(related)
todo.sequence += 1
todo.last_modified = datetime.now(LOCAL_TIMEZONE)
vtodo = VtodoWriter(todo).write()
self.cache.expire_file(todo.path)
mtime = _getmtime(todo.path)
self.cache.add_file(todo.list.name, todo.path, mtime)
todo.id = self.cache.add_vtodo(vtodo, todo.path, todo.id)
self.cache.save_to_disk()
def _getmtime(path: str) -> int:
return os.stat(path).st_mtime_ns
|
isc
|
d6ddf7058541586aba68dc51a6483120
| 29.589255
| 87
| 0.525014
| 4.229571
| false
| false
| false
| false
|
mozilla-services/autopush
|
autopush/logging.py
|
1
|
9732
|
"""Custom Logging Setup
"""
import io
import json
import Queue
import pkg_resources
import socket
import sys
import time
import threading
from typing import Any # noqa
import boto3
import raven
from raven.transport.twisted import TwistedHTTPTransport
from raven.utils.stacks import iter_stack_frames
from twisted.internet import reactor
from twisted.logger import (
formatEvent,
formatEventAsClassicLogText,
globalLogBeginner,
globalLogPublisher,
LogLevel,
ILogObserver
)
from zope.interface import implementer
from autopush.utils import get_ec2_instance_id
# A complete set of keys we don't include in Fields from a log event
IGNORED_KEYS = frozenset([
"factory",
"failure",
"format",
"isError",
"log_failure",
"log_format",
"log_flattened",
"log_level",
"log_legacy",
"log_logger",
"log_source",
"log_system",
"log_text",
"log_time",
"log_trace",
"message",
"message_type",
"severity",
"task_level",
"time",
"timestamp",
"type",
"why",
])
# whether the global LogBeginner.beginLoggingTo has been called: it
# should only be called once
began_logging = False
# an ec2 instance id or falling back to the hostname
instance_id_or_hostname = None
def begin_or_register(observer, redirectStandardIO=False, **kwargs):
# type: (Any, bool, **Any) -> None
"""Register observer with the global LogPublisher
Registers via the global LogBeginner the first time called.
"""
global began_logging
if not began_logging:
globalLogBeginner.beginLoggingTo(
[observer],
redirectStandardIO=redirectStandardIO,
**kwargs
)
began_logging = True
else:
globalLogPublisher.addObserver(observer)
@implementer(ILogObserver)
class PushLogger(object):
"""Twisted LogObserver implementation
Supports firehose delivery, Raven exception reporting, and json/test
console debugging output.
"""
def __init__(self, logger_name, log_level="debug", log_format="json",
log_output="stdout", sentry_dsn=None,
firehose_delivery_stream=None):
self.logger_name = "-".join([
logger_name,
pkg_resources.get_distribution("autopush").version
])
self._filename = None
self.log_level = LogLevel.lookupByName(log_level)
if log_output == "stdout":
self._output = sys.stdout
elif log_output == "none":
self._output = None
else:
self._filename = log_output
self._output = None
if log_format == "json":
self.format_event = self.json_format
else:
self.format_event = formatEventAsClassicLogText
if sentry_dsn:
self.raven_client = raven.Client(
release=raven.fetch_package_version("autopush"),
transport=TwistedHTTPTransport,
enable_breadcrumbs=False,
)
else:
self.raven_client = None
if firehose_delivery_stream:
self.firehose = FirehoseProcessor(
stream_name=firehose_delivery_stream)
else:
self.firehose = None
def __call__(self, event):
if self.raven_client and 'log_failure' in event:
self.raven_log(event)
if event["log_level"] < self.log_level:
return
text = self.format_event(event)
if self.firehose:
self.firehose.process(text)
if self._output:
self._output.write(unicode(text))
self._output.flush()
def raven_log(self, event):
f = event["log_failure"]
stack = None
extra = dict()
tb = f.getTracebackObject()
if not tb:
# include the current stack for at least some
# context. sentry's expecting that "Frames should be
# sorted from oldest to newest."
stack = list(iter_stack_frames())[:-5] # approx.
extra = dict(no_failure_tb=True)
extra.update(
log_format=event.get('log_format'),
log_namespace=event.get('log_namespace'),
client_info=event.get('client_info'),
)
reactor.callFromThread(
self.raven_client.captureException,
exc_info=(f.type, f.value, tb),
stack=stack,
extra=extra,
)
# just in case
del tb
def json_format(self, event):
error = bool(event.get("isError")) or "log_failure" in event
ts = event["log_time"]
if error:
severity = 3
else:
severity = 5
def to_fields(kv):
reply = dict()
for k, v in kv:
if (k not in IGNORED_KEYS and
type(v) in (str, unicode, list, int, float, bool)):
reply[k] = v
return reply
msg = {
"Hostname": instance_id_or_hostname,
"Timestamp": ts * 1000 * 1000 * 1000,
"Type": "twisted:log",
"Severity": event.get("severity") or severity,
"EnvVersion": "2.0",
"Fields": to_fields(event.iteritems()),
"Logger": self.logger_name,
}
# flatten the client_info into Fields
ci = event.get('client_info')
if ci and isinstance(ci, dict):
msg['Fields'].update(
to_fields(ci.iteritems()))
# flatten timings into Fields
ti = event.get('timings')
if ti and isinstance(ti, dict):
msg["Fields"].update(
to_fields(ti.iteritems())
)
# Add the nicely formatted message
msg["Fields"]["message"] = formatEvent(event)
return json.dumps(msg, skipkeys=True) + "\n"
def start(self):
if self._filename:
self._output = io.open(self._filename, "a", encoding="utf-8")
if self.firehose:
self.firehose.start()
begin_or_register(self)
def stop(self):
globalLogPublisher.removeObserver(self)
if self._filename:
self._output.close()
self._output = None
if self.firehose:
self.firehose.stop()
@classmethod
def setup_logging(cls, logger_name, log_level="info", log_format="json",
log_output="stdout", sentry_dsn=None,
firehose_delivery_stream=None,
no_aws=False):
global instance_id_or_hostname
if not instance_id_or_hostname:
instance_id = None if no_aws else get_ec2_instance_id()
instance_id_or_hostname = instance_id or socket.getfqdn()
pl = cls(logger_name, log_level=log_level, log_format=log_format,
log_output=log_output, sentry_dsn=sentry_dsn,
firehose_delivery_stream=firehose_delivery_stream)
pl.start()
reactor.addSystemEventTrigger('before', 'shutdown', pl.stop)
return pl
class FirehoseProcessor(object):
"""Batches log events for sending to AWS FireHose"""
RECORD_SEPARATOR = u"\x1e"
MAX_RECORD_SIZE = 1024 * 1024
MAX_REQUEST_SIZE = 4 * 1024 * 1024
MAX_RECORD_BATCH = 500
MAX_INTERVAL = 30
def __init__(self, stream_name, maxsize=0):
self._records = Queue.Queue(maxsize=maxsize)
self._prepped = []
self._total_size = 0
self._thread = None
self._client = boto3.client("firehose")
self._run = False
self._stream_name = stream_name
def start(self):
self._thread = threading.Thread(target=self._worker)
self._thread.start()
def stop(self):
self._records.put_nowait(None)
self._thread.join()
self._thread = None
def process(self, record):
try:
self._records.put_nowait(record)
except Queue.Full:
# Drop extra records
pass
def _worker(self):
self._last_send = time.time()
while True:
time_since_sent = time.time() - self._last_send
remaining_wait = self.MAX_INTERVAL - time_since_sent
try:
record = self._records.get(timeout=remaining_wait)
except Queue.Empty:
# Send the records
self._send_record_batch()
continue
if record is None:
# Stop signal so we exit
break
# Is this record going to put us over our request size?
rec_size = len(record) + 1
if self._total_size + rec_size >= self.MAX_REQUEST_SIZE:
self._send_record_batch()
# Store this record
self._prepped.append(record)
self._total_size += rec_size
if len(self._prepped) >= self.MAX_RECORD_BATCH:
self._send_record_batch()
# We're done running, send any remaining
self._send_record_batch()
def _send_record_batch(self):
self._last_send = time.time()
if not self._prepped:
return
# Attempt to send the record batch twice, or give up
tries = 0
while tries < 3:
response = self._client.put_record_batch(
DeliveryStreamName=self._stream_name,
Records=[{"Data": bytes(self.RECORD_SEPARATOR + record)}
for record in self._prepped]
)
if response["FailedPutCount"] > 0:
tries += 1
else:
break
self._prepped = []
self._total_size = 0
|
mpl-2.0
|
df1cf21c7895a731db9c97a39f8e2d54
| 28.852761
| 76
| 0.561241
| 4.070263
| false
| false
| false
| false
|
mozilla-services/autopush
|
autopush/jwt.py
|
1
|
4878
|
import base64
import binascii
import json
import os
from cryptography.exceptions import InvalidSignature
from cryptography.hazmat.primitives.asymmetric import ec, utils
from cryptography.hazmat.primitives import hashes
from pyasn1.error import PyAsn1Error
from twisted.logger import Logger
from typing import Tuple # noqa
from autopush.types import JSONDict # noqa
# temporarily toggleable for easily enabling on production
_JWT_MEMORY_PRESSURE = os.environ.get('_JWT_MEMORY_PRESSURE', 0)
if _JWT_MEMORY_PRESSURE != 0: # pragma: nocover
try:
from __pypy__ import add_memory_pressure
except ImportError:
_JWT_MEMORY_PRESSURE = 0
else:
try:
_JWT_MEMORY_PRESSURE = int(_JWT_MEMORY_PRESSURE)
except ValueError:
_JWT_MEMORY_PRESSURE = 2496
def repad(string):
# type: (str) -> str
"""Adds padding to strings for base64 decoding"""
if len(string) % 4:
string += '===='[len(string) % 4:]
return string
class VerifyJWT(object):
"""Minimally verify a Vapid JWT object.
Why hand roll? Most python JWT libraries either use a python elliptic
curve library directly, or call one that does, or is abandoned, or a
dozen other reasons.
After spending half a day looking for reasonable replacements, I
decided to just write the functions we need directly.
THIS IS NOT A FULL JWT REPLACEMENT.
"""
@staticmethod
def extract_signature(auth):
# type: (str) -> Tuple[str, str]
"""Fix the JWT auth token.
The JWA spec defines the signature to be a pair of 32octet encoded
longs.
The `ecdsa` library signs using a raw, 32octet pair of values (s, r).
Cryptography, which uses OpenSSL, uses a DER sequence of (s, r).
This function converts the raw ecdsa to DER.
:param auth: A JWT authorization token.
:type auth: str
:return tuple containing the signature material and signature
"""
payload, asig = auth.encode('utf8').rsplit(".", 1)
sig = base64.urlsafe_b64decode(repad(asig))
if len(sig) != 64:
return payload, sig
encoded = utils.encode_dss_signature(
s=int(binascii.hexlify(sig[32:]), 16),
r=int(binascii.hexlify(sig[:32]), 16)
)
return payload, encoded
@staticmethod
def extract_assertion(token):
# type (str) -> JSONDict
"""Extract the assertion dictionary from the passed token. This does
NOT do validation.
:param token: Partial or full VAPID auth token
:type token: str
:return dict of the VAPID claims
"""
return json.loads(
base64.urlsafe_b64decode(
repad(token.split('.')[1]).encode('utf8')))
@staticmethod
def validate_and_extract_assertion(token, key):
# type (str, str) -> JSONDict
"""Decode a web token into a assertion dictionary.
This attempts to rectify both ecdsa and openssl generated
signatures. We use the built-in cryptography library since it wraps
libssl and is faster than the python only approach.
:param token: VAPID auth token
:type token: str
:param key: bitarray containing public key
:type key: str or bitarray
:return dict of the VAPID claims
:raise InvalidSignature
"""
# convert the signature if needed.
try:
sig_material, signature = VerifyJWT.extract_signature(token)
pkey = ec.EllipticCurvePublicKey.from_encoded_point(
ec.SECP256R1(),
key
)
# cffi issue #320: public_key & verify allocate approx.
if _JWT_MEMORY_PRESSURE: # pragma: nocover
add_memory_pressure(_JWT_MEMORY_PRESSURE)
# NOTE: verify() will take any string as the signature. It appears
# to be doing lazy verification and matching strings rather than
# comparing content values. If the signatures start failing for
# some unknown reason in the future, decode the signature and
# make sure it matches how we're reconstructing it.
# This will raise an InvalidSignature exception if failure.
# It will be captured externally.
pkey.verify(
signature,
sig_material.encode('utf8'),
ec.ECDSA(hashes.SHA256()))
return VerifyJWT.extract_assertion(sig_material)
except InvalidSignature:
raise
except (ValueError, TypeError, binascii.Error, PyAsn1Error):
raise InvalidSignature()
except Exception: # pragma: no cover
Logger().failure("Unexpected error processing JWT")
raise InvalidSignature()
|
mpl-2.0
|
237d5e819bac273b9eccf4727a5a1386
| 32.875
| 78
| 0.625666
| 4.309187
| false
| false
| false
| false
|
mozilla-services/autopush
|
autopush/ssl.py
|
1
|
5050
|
"""Custom SSL configuration"""
from __future__ import absolute_import
import socket # noqa
import ssl
from typing import ( # noqa
Any,
Dict,
FrozenSet,
Optional,
Tuple,
)
from OpenSSL import SSL
from twisted.internet.ssl import DefaultOpenSSLContextFactory
try:
SSL_PROTO = ssl.PROTOCOL_TLS
except AttributeError: # pragma: nocover
SSL_PROTO = ssl.PROTOCOL_SSLv23
MOZILLA_INTERMEDIATE_CIPHERS = (
'ECDHE-RSA-AES128-GCM-SHA256:'
'ECDHE-ECDSA-AES128-GCM-SHA256:'
'ECDHE-RSA-AES256-GCM-SHA384:'
'ECDHE-ECDSA-AES256-GCM-SHA384:'
'DHE-RSA-AES128-GCM-SHA256:'
'DHE-DSS-AES128-GCM-SHA256:'
'ECDHE-RSA-AES128-SHA256:'
'ECDHE-ECDSA-AES128-SHA256:'
'ECDHE-RSA-AES128-SHA:'
'ECDHE-ECDSA-AES128-SHA:'
'ECDHE-RSA-AES256-SHA384:'
'ECDHE-ECDSA-AES256-SHA384:'
'ECDHE-RSA-AES256-SHA:'
'ECDHE-ECDSA-AES256-SHA:'
'DHE-RSA-AES128-SHA256:'
'DHE-RSA-AES128-SHA:'
'DHE-DSS-AES128-SHA256:'
'DHE-RSA-AES256-SHA256:'
'DHE-DSS-AES256-SHA:'
'DHE-RSA-AES256-SHA:'
'AES128-GCM-SHA256:'
'AES256-GCM-SHA384:'
'AES128-SHA256:'
'AES256-SHA256:'
'AES128-SHA:'
'AES256-SHA:'
'AES:'
'CAMELLIA:DES-CBC3-SHA:'
'!aNULL:!eNULL:!EXPORT:!DES:!RC4:!MD5:!PSK:!aECDH:'
'!EDH-DSS-DES-CBC3-SHA:!EDH-RSA-DES-CBC3-SHA:!KRB5-DES-CBC3-SHA'
)
class AutopushSSLContextFactory(DefaultOpenSSLContextFactory):
"""A SSL context factory"""
def __init__(self, *args, **kwargs):
self.dh_file = kwargs.pop('dh_file', None)
self.require_peer_certs = kwargs.pop('require_peer_certs', False)
DefaultOpenSSLContextFactory.__init__(self, *args, **kwargs)
def cacheContext(self):
"""Setup the main context factory with custom SSL settings"""
if self._context is None:
ctx = self._contextFactory(self.sslmethod)
ctx.set_cipher_list(MOZILLA_INTERMEDIATE_CIPHERS)
ctx.set_options(SSL.OP_CIPHER_SERVER_PREFERENCE)
ctx.set_options(SSL.OP_NO_SSLv2)
ctx.set_options(SSL.OP_NO_SSLv3)
ctx.set_options(SSL.OP_NO_COMPRESSION)
ctx.set_mode(SSL.MODE_RELEASE_BUFFERS)
ctx.set_options(SSL.OP_ALL & ~SSL.OP_MICROSOFT_BIG_SSLV3_BUFFER)
ctx.use_certificate_chain_file(self.certificateFileName)
ctx.use_privatekey_file(self.privateKeyFileName)
if self.dh_file:
ctx.load_tmp_dh(self.dh_file)
if self.require_peer_certs:
# Require peer certs but only for use by
# RequestHandlers
ctx.set_verify(
SSL.VERIFY_PEER |
SSL.VERIFY_CLIENT_ONCE,
self._allow_peer)
self._context = ctx
def _allow_peer(self, conn, cert, errno, depth, preverify_ok):
# skip verification: we only care about whitelisted signatures
# on file
return True
def monkey_patch_ssl_wrap_socket():
"""Replace ssl.wrap_socket with ssl_wrap_socket_cached"""
ssl.wrap_socket = ssl_wrap_socket_cached
def undo_monkey_patch_ssl_wrap_socket():
"""Undo monkey_patch_ssl_wrap_socket"""
ssl.wrap_socket = _orig_ssl_wrap_socket
_CacheKey = FrozenSet[Tuple[str, Any]]
_sslcontext_cache = {} # type: Dict[_CacheKey, ssl.SSLContext]
_orig_ssl_wrap_socket = ssl.wrap_socket
def ssl_wrap_socket_cached(
sock, # type: socket.socket
keyfile=None, # type: Optional[str]
certfile=None, # type: Optional[str]
server_side=False, # type: bool
cert_reqs=ssl.CERT_NONE, # type: int
ssl_version=SSL_PROTO, # type: int
ca_certs=None, # type: Optional[str]
do_handshake_on_connect=True, # type: bool
suppress_ragged_eofs=True, # type: bool
ciphers=None # type: Optional[str]
):
# type: (...) -> ssl.SSLSocket
"""ssl.wrap_socket replacement that caches SSLContexts"""
key_kwargs = (
('keyfile', keyfile),
('certfile', certfile),
('cert_reqs', cert_reqs),
('ssl_version', ssl_version),
('ca_certs', ca_certs),
('ciphers', ciphers),
)
key = frozenset(key_kwargs)
context = _sslcontext_cache.get(key)
if context is not None:
return context.wrap_socket(
sock,
server_side=server_side,
do_handshake_on_connect=do_handshake_on_connect,
suppress_ragged_eofs=suppress_ragged_eofs
)
wrapped = _orig_ssl_wrap_socket(
sock,
keyfile=keyfile,
certfile=certfile,
server_side=server_side,
cert_reqs=cert_reqs,
ssl_version=ssl_version,
ca_certs=ca_certs,
do_handshake_on_connect=do_handshake_on_connect,
suppress_ragged_eofs=suppress_ragged_eofs,
ciphers=ciphers
)
_sslcontext_cache[key] = wrapped.context
return wrapped
|
mpl-2.0
|
5b02ead5f4ae3980a592fd56f5466c32
| 30.5625
| 76
| 0.6
| 3.344371
| false
| false
| false
| false
|
mozilla-services/autopush
|
autopush/router/apns2.py
|
1
|
7384
|
import json
from collections import deque
from decimal import Decimal
import hyper.tls
from hyper import HTTP20Connection
from hyper.http20.exceptions import HTTP20Error
from autopush.exceptions import RouterException
SANDBOX = 'api.development.push.apple.com'
SERVER = 'api.push.apple.com'
APNS_MAX_CONNECTIONS = 20
# These values are defined by APNs as header values that should be sent.
# The hyper library requires that all header values be strings.
# These values should be considered "opaque" to APNs.
# see https://developer.apple.com/search/?q=%22apns-priority%22
APNS_PRIORITY_IMMEDIATE = '10'
APNS_PRIORITY_LOW = '5'
class ComplexEncoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj, Decimal):
return int(obj.to_integral_value())
# for most data types, this function isn't called.
# the following is added for safety, but should not
# be required.
return json.JSONEncoder.default(self, obj) # pragma nocover
class APNSException(Exception):
pass
class APNSClient(object):
def __init__(self, cert_file, key_file, topic,
alt=False, use_sandbox=False,
max_connections=APNS_MAX_CONNECTIONS,
logger=None, metrics=None,
load_connections=True,
max_retry=2):
"""Create the APNS client connector.
The cert_file and key_file can be derived from the exported `.p12`
**Apple Push Services: *bundleID* ** key contained in the **Keychain
Access** application. To extract the proper PEM formatted data, you
can use the following commands:
```
openssl pkcs12 -in file.p12 -out apns_cert.pem -clcerts -nokeys
openssl pkcs12 -in file.p12 -out apns_key.pem -nocerts -nodes
```
The *topic* is the Bundle ID of the bridge recipient iOS application.
Since the cert needs to be tied directly to an application, the topic
is usually similar to "com.example.MyApplication".
:param cert_file: Path to the PEM formatted APNs certification file.
:type cert_file: str
:param key_file: Path to the PEM formatted APNs key file.
:type key_file: str
:param topic: The *Bundle ID* that identifies the assoc. iOS app.
:type topic: str
:param alt: Use the alternate APNs publication port (if 443 is blocked)
:type alt: bool
:param use_sandbox: Use the development sandbox
:type use_sandbox: bool
:param max_connections: Max number of pooled connections to use
:type max_connections: int
:param logger: Status logger
:type logger: logger
:param metrics: Metric recorder
:type metrics: autopush.metrics.IMetric
:param load_connections: used for testing
:type load_connections: bool
:param max_retry: Number of HTTP2 transmit attempts
:type max_retry: int
"""
self.server = SANDBOX if use_sandbox else SERVER
self.port = 2197 if alt else 443
self.log = logger
self.metrics = metrics
self.topic = topic
self._max_connections = max_connections
self._max_retry = max_retry
self.connections = deque(maxlen=max_connections)
if load_connections:
self.ssl_context = hyper.tls.init_context(cert=(cert_file,
key_file))
self.connections.extendleft((HTTP20Connection(
self.server,
self.port,
ssl_context=self.ssl_context,
force_proto='h2') for x in range(0, max_connections)))
if self.log:
self.log.debug("Starting APNS connection")
def send(self, router_token, payload, apns_id,
priority=True, topic=None, exp=None):
"""Send the dict of values to the remote bridge
This sends the raw data to the remote bridge application using the
APNS2 HTTP2 API.
:param router_token: APNs provided hex token identifying recipient
:type router_token: str
:param payload: Data to send to recipient
:type payload: dict
:param priority: True is high priority, false is low priority
:type priority: bool
:param topic: BundleID for the recipient application (overides default)
:type topic: str
:param exp: Message expiration timestamp
:type exp: timestamp
"""
body = json.dumps(payload, cls=ComplexEncoder)
priority = APNS_PRIORITY_IMMEDIATE if priority else APNS_PRIORITY_LOW
# NOTE: Hyper requires that all header values be strings. 'Priority'
# is a integer string, which may be "simplified" and cause an error.
# The added str() function safeguards against that.
headers = {
'apns-id': apns_id,
'apns-priority': str(priority),
'apns-topic': topic or self.topic,
}
if exp:
headers['apns-expiration'] = str(exp)
url = '/3/device/' + router_token
attempt = 0
while True:
try:
connection = self._get_connection()
# request auto-opens closed connections, so if a connection
# has timed out or failed for other reasons, it's automatically
# re-established.
stream_id = connection.request(
'POST', url=url, body=body, headers=headers)
# get_response() may return an AttributeError. Not really sure
# how it happens, but the connected socket may get set to None.
# We'll treat that as a premature socket closure.
response = connection.get_response(stream_id)
if response.status != 200:
reason = json.loads(
response.read().decode('utf-8'))['reason']
raise RouterException(
"APNS Transmit Error {}:{}".format(response.status,
reason),
status_code=response.status,
response_body="APNS could not process "
"your message {}".format(reason),
log_exception=True,
reason=reason
)
break
except (HTTP20Error, IOError):
connection.close()
attempt += 1
if attempt < self._max_retry:
continue
raise
finally:
# Returning a closed connection to the pool is ok.
# hyper will reconnect on .request()
self._return_connection(connection)
def _get_connection(self):
try:
connection = self.connections.pop()
return connection
except IndexError:
raise RouterException(
"Too many APNS requests, increase pool from {}".format(
self._max_connections
),
status_code=503,
response_body="APNS busy, please retry")
def _return_connection(self, connection):
self.connections.appendleft(connection)
|
mpl-2.0
|
a22058220f1dc18d18425799194e9155
| 38.913514
| 79
| 0.585861
| 4.558025
| false
| false
| false
| false
|
unitedstates/congress
|
congress/tasks/bill_info.py
|
1
|
50773
|
from congress.tasks import utils
import logging
import re
import json
from lxml import etree
import copy
import datetime
def create_govtrack_xml(bill, options):
govtrack_type_codes = {'hr': 'h', 's': 's', 'hres': 'hr', 'sres': 'sr', 'hjres': 'hj', 'sjres': 'sj', 'hconres': 'hc', 'sconres': 'sc'}
root = etree.Element("bill")
root.set("session", bill['congress'])
root.set("type", govtrack_type_codes[bill['bill_type']])
root.set("number", bill['number'])
root.set("updated", utils.format_datetime(bill['updated_at']))
def make_node(parent, tag, text, **attrs):
if options.get("govtrack", False):
# Rewrite bioguide_id attributes as just id with GovTrack person IDs.
attrs2 = {}
for k, v in attrs.items():
if v:
if k == "bioguide_id":
# remap "bioguide_id" attributes to govtrack "id"
k = "id"
v = str(utils.translate_legislator_id('bioguide', v, 'govtrack'))
if k == "thomas_id":
# remap "thomas_id" attributes to govtrack "id"
k = "id"
v = str(utils.translate_legislator_id('thomas', v, 'govtrack'))
attrs2[k] = v
attrs = attrs2
return utils.make_node(parent, tag, text, **attrs)
# for American Memory Century of Lawmaking bills...
for source in bill.get("sources", []):
n = make_node(root, "source", "")
for k, v in sorted(source.items()):
if k == "source":
n.text = v
elif k == "source_url":
n.set("url", v)
else:
n.set(k, str(v))
if "original_bill_number" in bill:
make_node(root, "bill-number", bill["original_bill_number"])
make_node(root, "state", bill['status'], datetime=bill['status_at'])
old_status = make_node(root, "status", None)
make_node(old_status, "introduced" if bill['status'] in ("INTRODUCED", "REFERRED") else "unknown", None, datetime=bill['status_at']) # dummy for the sake of comparison
make_node(root, "introduced", None, datetime=bill['introduced_at'])
titles = make_node(root, "titles", None)
for title in bill['titles']:
n = make_node(titles, "title", title['title'])
n.set("type", title['type'])
if title['as']:
n.set("as", title['as'])
if title['is_for_portion']:
n.set("partial", "1")
def get_legislator_id_attr(p):
if "bioguide_id" in p: return { "bioguide_id": p["bioguide_id"] }
if "thomas_id" in p: return { "thomas_id": p["thomas_id"] }
return { }
if bill['sponsor']:
# TODO: Sponsored by committee?
make_node(root, "sponsor", None, **get_legislator_id_attr(bill['sponsor']))
else:
make_node(root, "sponsor", None)
cosponsors = make_node(root, "cosponsors", None)
for cosp in bill['cosponsors']:
n = make_node(cosponsors, "cosponsor", None, **get_legislator_id_attr(cosp))
if cosp["sponsored_at"]:
n.set("joined", cosp["sponsored_at"])
if cosp["withdrawn_at"]:
n.set("withdrawn", cosp["withdrawn_at"])
actions = make_node(root, "actions", None)
for action in bill['actions']:
a = make_node(actions,
action['type'] if action['type'] in ("vote", "vote-aux", "calendar", "topresident", "signed", "enacted", "vetoed") else "action",
None,
datetime=action['acted_at'])
if action.get("status"):
a.set("state", action["status"])
if action['type'] in ('vote', 'vote-aux'):
a.clear() # re-insert date between some of these attributes
a.set("how", action["how"])
a.set("type", action["vote_type"])
if action.get("roll") != None:
a.set("roll", action["roll"])
a.set("datetime", utils.format_datetime(action['acted_at']))
a.set("where", action["where"])
a.set("result", action["result"])
if action.get("suspension"):
a.set("suspension", "1")
if action.get("status"):
a.set("state", action["status"])
if action['type'] == 'calendar' and "calendar" in action:
a.set("calendar", action["calendar"])
if action["under"]:
a.set("under", action["under"])
if action["number"]:
a.set("number", action["number"])
if action['type'] == 'enacted':
a.clear() # re-insert date between some of these attributes
a.set("number", "%s-%s" % (bill['congress'], action["number"]))
a.set("type", action["law"])
a.set("datetime", utils.format_datetime(action['acted_at']))
if action.get("status"):
a.set("state", action["status"])
if action['type'] == 'vetoed':
if action.get("pocket"):
a.set("pocket", "1")
if action.get('text'):
make_node(a, "text", action['text'])
if action.get('in_committee'):
make_node(a, "committee", None, name=action['in_committee'])
for cr in action['references']:
make_node(a, "reference", None, ref=cr['reference'], label=cr['type'])
committees = make_node(root, "committees", None)
for cmt in bill['committees']:
make_node(committees, "committee", None, code=(cmt["committee_id"] + cmt["subcommittee_id"]) if cmt.get("subcommittee_id", None) else cmt["committee_id"], name=cmt["committee"], subcommittee=cmt.get("subcommittee").replace("Subcommittee on ", "") if cmt.get("subcommittee") else "", activity=", ".join(c.title() for c in cmt["activity"]))
relatedbills = make_node(root, "relatedbills", None)
for rb in bill['related_bills']:
if rb['type'] == "bill":
rb_bill_type, rb_number, rb_congress = utils.split_bill_id(rb['bill_id'])
make_node(relatedbills, "bill", None, session=rb_congress, type=govtrack_type_codes[rb_bill_type], number=rb_number, relation="unknown" if rb['reason'] == "related" else rb['reason'])
subjects = make_node(root, "subjects", None)
if bill['subjects_top_term']:
make_node(subjects, "term", None, name=bill['subjects_top_term'])
for s in bill['subjects']:
if s != bill['subjects_top_term']:
make_node(subjects, "term", None, name=s)
amendments = make_node(root, "amendments", None)
for amd in bill['amendments']:
make_node(amendments, "amendment", None, number=amd["chamber"] + str(amd["number"]))
if bill.get('summary'):
make_node(root, "summary", bill['summary']['text'], date=bill['summary']['date'], status=bill['summary']['as'])
if bill.get('committee_reports'):
committee_reports = make_node(root, "committee-reports", None)
for report in bill.get('committee_reports', []):
make_node(committee_reports, "report", report)
return etree.tostring(root, pretty_print=True)
def sponsor_for(sponsor_dict):
if sponsor_dict is None:
# TODO: This can hopefully be removed. In testing s414-113
# was missing sponsor data. But all bills have a sponsor?
return None
# TODO: Don't do regex matching here. Find another way.
m = re.match(r'(?P<title>(Rep\.|Sen\.|Del\.|Resident Commissioner)) (?P<name>.*?) +\[(?P<party>[DRIL])-(?P<state>[A-Z][A-Z])(-(?P<district>\d{1,2}|At Large|None))?\]$',
sponsor_dict['fullName'])
if not m:
raise ValueError(sponsor_dict)
return {
'title': m.group("title"),
'name': m.group("name"), # the firstName, middleName, lastName fields have inconsistent capitalization - some are all uppercase
'state': sponsor_dict["state"],
'district': sponsor_dict.get("district"), # missing for senators
#'party': m.group('party'),
'bioguide_id': sponsor_dict['bioguideId'],
'type': 'person'
}
def summary_for(summaries):
# Some bills are missing the summaries entirely?
if summaries is None:
return None
# Take the most recent summary, by looking at the lexicographically last updateDate.
summaries = summaries['item']
summary = sorted(summaries, key = lambda s: s['updateDate'])[-1]
# Build dict.
return {
"date": summary['updateDate'],
"as": summary['name'],
"text": strip_tags(summary['text']),
}
def strip_tags(text):
# Preserve paragraph breaks. Convert closing p tags (and surrounding whitespace) into two newlines. Strip trailing whitespace
text = re.sub("\s*</\s*p\s*>\s*", "\n\n", text).strip()
# naive stripping of tags, should work okay in this limited context
text = re.sub("<[^>]+>", "", text)
# compress and strip whitespace artifacts, except for the paragraph breaks
text = re.sub("[ \t\r\f\v]{2,}", " ", text).strip()
# Replace HTML entities with characters.
text = utils.unescape(text)
return text
def committees_for(committee_list):
if committee_list is None:
return []
committee_list = committee_list['item']
activity_text_map = {
"Referred to": ["referral"],
"Hearings by": ["hearings"],
"Markup by": ["markup"],
"Reported by": ["reporting"],
"Discharged from": ["discharged"],
"Reported original measure": ["origin", "reporting"],
}
def fix_subcommittee_name(name):
return re.sub("(.*) Subcommittee$",
lambda m : "Subcommittee on " + m.group(1),
name)
def get_activitiy_list(item):
if not item['activities']:
return []
return sum([activity_text_map.get(i['name'], [i['name']]) for i in item['activities']['item']], [])
def fixup_committee_name(name):
# Preserve backwards compatiblity.
if name == "House House Administration":
return "House Administration"
return name
def build_dict(item):
committee_dict = {
'activity': get_activitiy_list(item),
'committee': fixup_committee_name(item['chamber'] + ' ' + re.sub(" Committee$", "", item['name'])),
'committee_id': item['systemCode'][0:-2].upper(),
}
subcommittees_list = []
if 'subcommittees' in item and item['subcommittees'] is not None:
for subcommittee in item['subcommittees']['item']:
subcommittee_dict = copy.deepcopy(committee_dict)
subcommittee_dict.update({
'subcommittee': fix_subcommittee_name(subcommittee['name']),
'subcommittee_id': subcommittee['systemCode'][-2:],
'activity': get_activitiy_list(subcommittee),
})
subcommittees_list.append(subcommittee_dict)
return [committee_dict] + subcommittees_list
return sum([build_dict(committee) for committee in committee_list], [])
def titles_for(title_list):
def build_dict(item):
full_type = item['titleType']
is_for_portion = False
# "Official Titles as Introduced", "Short Titles on Conference report"
splits = re.split(" as | on ", full_type, 1)
if len(splits) == 2:
title_type, state = splits
if state.endswith(" for portions of this bill"):
is_for_portion = True
state = state.replace(" for portions of this bill" ,"")
state = state.replace(":", "").lower()
else:
title_type, state = full_type, None
if "Popular Title" in title_type:
title_type = "popular"
elif "Short Title" in title_type:
title_type = "short"
elif "Official Title" in title_type:
title_type = "official"
elif "Display Title" in title_type:
title_type = "display"
elif title_type == "Non-bill-report":
# TODO: What kind of title is this? Maybe assign
# a better title_type code once we know.
title_type = "nonbillreport"
else:
raise Exception("Unknown title type: " + title_type)
return {
'title': item['title'],
'is_for_portion': is_for_portion,
'as': state,
'type': title_type
}
titles = [build_dict(title) for title in title_list]
# THOMAS used to give us the titles in a particular order:
# short as introduced
# short as introduced (for portion)
# short as some later stage
# short as some later stage (for portion)
# official as introduced
# official as some later stage
# The "as" stages (introduced, etc.) were in the order in which actions
# actually occurred. This was handy because to get the current title for
# a bill, you need to know which action type was most recent. The new
# order is reverse-chronological, so we have to turn the order around
# for backwards compatibility. Rather than do a simple .reverse(), I'm
# adding an explicit sort order here which gets very close to the THOMAS
# order.
# Unfortunately this can no longer be relied on because the new bulk
# data has the "as" stages sometimes in the wrong order: The "reported to
# senate" status for House bills seems to be consistently out of place.
titles_copy = list(titles) # clone before beginning sort
def first_index_of(**kwargs):
for i, title in enumerate(titles_copy):
for k, v in kwargs.items():
k = k.replace("_", "")
if title.get(k) != v:
break
else:
# break not called --- all match
return i
titles.sort(key = lambda title: (
# keep the same 'short', 'official', 'display' order intact
first_index_of(type=title['type']),
# within each of those categories, reverse the 'as' order
-first_index_of(type=title['type'], _as=title.get('as')),
# put titles for portions last, within the type/as category
title['is_for_portion'],
# and within that, just sort alphabetically, case-insensitively (which is
# what it appears THOMAS used to do)
title['title'].lower(),
))
return titles
# the most current title of a given type is the first one in the last 'as' subgroup
# of the titles for the whole bill (that is, if there's no title for the whole bill
# in the last 'as' subgroup, use the previous 'as' subgroup and so on) --- we think
# this logic matches THOMAS/Congress.gov.
def current_title_for(titles, title_type):
current_title = None
current_as = -1 # not None, cause for popular titles, None is a valid 'as'
for title in titles:
if title['type'] != title_type or title['is_for_portion'] == True:
continue
if title['as'] == current_as:
continue
# right type, new 'as', store first one
current_title = title['title']
current_as = title['as']
return current_title
def actions_for(action_list, bill_id, title):
# The bulk XML data has action history information from multiple sources. For
# major actions, the Library of Congress (code 9) action item often duplicates
# the information of a House/Senate action item. We have to skip one so that we
# don't tag multiple history items with the same parsed action info, which
# would imply the action (like a vote) ocurred multiple times. THOMAS appears
# to have suppressed the Library of Congress action lines in certain cases
# to avoid duplication - they were not in our older data files.
#
# Also, there are some ghost action items with totally empty text. Remove those.
# TODO: When removed from upstream data, we can remove that check.
closure = {
"prev": None,
}
def keep_action(item, closure):
if item['text'] in (None, ""):
return False
keep = True
if closure['prev']:
if item['sourceSystem']['code'] == "9":
# Date must match previous action..
# If both this and previous have a time, the times must match.
# The text must approximately match. Sometimes the LOC text has a prefix
# and different whitespace. And they may drop references -- so we'll
# use our action_for helper function to drop references from both
# prior to the string comparison.
if item['actionDate'] == closure["prev"]["actionDate"] \
and (item.get('actionTime') == closure["prev"].get("actionTime") or not item.get('actionTime') or not closure["prev"].get("actionTime")) \
and action_for(item)['text'].replace(" ", "").endswith(action_for(closure["prev"])['text'].replace(" ", "")):
keep = False
closure['prev'] = item
return keep
action_list = [item for item in action_list
if keep_action(item, closure)]
# Turn the actions into dicts. The actions are in reverse-chronological
# order in the bulk data XML. Process them in chronological order so that
# our bill status logic sees the actions in the right order.
def build_dict(item, closure):
action_dict = action_for(item)
extra_action_info, new_status = parse_bill_action(action_dict, closure['prev_status'], bill_id, title)
# only change/reflect status change if there was one
if new_status:
action_dict['status'] = new_status
closure['prev_status'] = new_status
# add additional parsed fields
if extra_action_info:
action_dict.update(extra_action_info)
return action_dict
closure = {
"prev_status": "INTRODUCED",
}
return [build_dict(action, closure) for action in reversed(action_list)]
# clean text, pull out the action type, any other associated metadata with an action
def action_for(item):
# acted_at
if not item.get('actionTime'):
acted_at = item.get('actionDate', '')
else:
# Although we get the action date & time in an ISO-ish format (split
# across two fields), and although we know it's in local time at the
# U.S. Capitol (i.e. U.S. Eastern), we don't know the UTC offset which
# is a part of how we used to serialize the time. So parse and then
# use pytz (via format_datetime) to re-serialize.
acted_at = utils.format_datetime(datetime.datetime.strptime(item.get('actionDate', '') + " " + item['actionTime'], "%Y-%m-%d %H:%M:%S"))
# text & references
# (amendment actions don't always have text?)
text = item['text'] if item['text'] is not None else ''
# strip out links
text = re.sub(r"</?[Aa]( \S.*?)?>", "", text)
# remove and extract references
references = []
match = re.search("\s*\(([^)]+)\)\s*$", text)
if match:
# remove the matched section
text = text[0:match.start()] + text[match.end():]
types = match.group(1)
# fix use of comma or colon instead of a semi colon between reference types
# have seen some accidental capitalization combined with accidental comma, thus the 'T'
# e.g. "text of Title VII as reported in House: CR H3075-3077, Text omission from Title VII:" (hr5384-109)
types = re.sub("[,:] ([a-zT])", r"; \1", types)
# fix "CR:"
types = re.sub("CR:", "CR", types)
# fix a missing semicolon altogether between references
# e.g. sres107-112, "consideration: CR S1877-1878 text as"
types = re.sub("(\d+) +([a-z])", r"\1; \2", types)
for reference in re.split("; ?", types):
if ": " not in reference:
type, reference = None, reference
else:
type, reference = reference.split(": ", 1)
references.append({'type': type, 'reference': reference})
# extract committee IDs
if item.get('committee'):
# Data format through Dec. 13, 2019 had only one <committee/> (though node could be empty).
committee_nodes = [item['committee']]
elif item.get('committees'):
# Starting on Dec. 13, 2019, and with a slow rollout, multiple committees could be specified.
# Thankfully our JSON output format allowed it already.
committee_nodes = item['committees'].get("item", [])
else:
# <committee/> or <committees/>, whichever was present, was empty
committee_nodes = []
# form dict
action_dict = {
'acted_at': acted_at,
'action_code': item.get('actionCode', ''),
'committees': [committee_item['systemCode'][0:-2].upper() for committee_item in committee_nodes] if committee_nodes else None, # if empty, store None
'references': references,
'type': 'action', # replaced by parse_bill_action if a regex matches
'text': text,
}
if not action_dict["committees"]:
# remove if empty - not present in how we used to generate the file
del action_dict["committees"]
# sometimes there are links (one case is for bills passed by a rule in a resolution, the link will point to the resolution)
if (item.get("links") or {}).get("link") is not None:
action_dict["links"] = item["links"]["link"]
return action_dict
def cosponsors_for(cosponsors_list):
if cosponsors_list is None:
return []
cosponsors_list = cosponsors_list['item']
def build_dict(item):
cosponsor_dict = sponsor_for(item)
del cosponsor_dict["type"] # always 'person'
cosponsor_dict.update({
'sponsored_at': item['sponsorshipDate'],
'withdrawn_at': item['sponsorshipWithdrawnDate'],
'original_cosponsor': item['isOriginalCosponsor'] == 'True'
})
return cosponsor_dict
cosponsors = [build_dict(cosponsor) for cosponsor in cosponsors_list]
# TODO: Can remove. Sort like the old THOMAS order to make diffs easier.
cosponsors.sort(key = lambda c: c['name'].lower())
return cosponsors
def related_bills_for(related_bills_list):
if related_bills_list is None:
return []
related_bills_list = related_bills_list['item']
def build_dict(item):
return {
'reason': item['relationshipDetails']['item'][0]['type'].replace('bill', '').strip().lower(),
'bill_id': '{0}{1}-{2}'.format(item['type'].replace('.', '').lower(), item['number'], item['congress']),
'type': 'bill',
'identified_by': item['relationshipDetails']['item'][0]['identifiedBy']
}
# Are these THOMAS related bill relation texts gone from the bulk data?
reasons = (
("Identical bill identified by (CRS|House|Senate)", "identical"),
("Companion bill", "identical"),
("Related bill (as )?identified by (CRS|the House Clerk's office|House committee|Senate)", "related"),
("passed in (House|Senate) in lieu of .*", "supersedes"),
("Rule related to .* in (House|Senate)", "rule"),
("This bill has text inserted from .*", "includes"),
("Text from this bill was inserted in .*", "included-in"),
("Bill related to rule .* in House", "ruled-by"),
("This bill caused other related action on .*", "caused-action"),
("Other related action happened to this bill because of .*", "action-caused-by"),
("Bill that causes .* to be laid on table in House", "caused-action"),
("Bill laid on table by virtue of .* passage in House", "action-caused-by"),
("Bill that caused the virtual passage of .* in House", "caused-action"),
("Bill passed by virtue of .* passage in House", "caused-action-by"),
("Bill on wich enrollment has been corrected by virtue of .* passage in House", "caused-action"),
)
return [build_dict(related_bill) for related_bill in related_bills_list]
# get the public or private law number from any enacted action
def slip_law_from(actions):
for action in actions:
if action["type"] == "enacted":
return {
'law_type': action["law"],
'congress': action["congress"],
'number': action["number"]
}
# find the latest status change in a set of processed actions
def latest_status(actions, introduced_at):
status, status_date = "INTRODUCED", introduced_at
for action in actions:
if action.get('status', None):
status = action['status']
status_date = action['acted_at']
return status, status_date
# look at the final set of processed actions and pull out the major historical events
def history_from_actions(actions):
history = {}
activation = activation_from(actions)
if activation:
history['active'] = True
history['active_at'] = activation['acted_at']
else:
history['active'] = False
house_vote = None
for action in actions:
if (action['type'] == 'vote') and (action['where'] == 'h') and (action['vote_type'] != "override"):
house_vote = action
if house_vote:
history['house_passage_result'] = house_vote['result']
history['house_passage_result_at'] = house_vote['acted_at']
senate_vote = None
for action in actions:
if (action['type'] == 'vote') and (action['where'] == 's') and (action['vote_type'] != "override"):
senate_vote = action
if senate_vote:
history['senate_passage_result'] = senate_vote['result']
history['senate_passage_result_at'] = senate_vote['acted_at']
senate_vote = None
for action in actions:
if (action['type'] == 'vote-aux') and (action['vote_type'] == 'cloture') and (action['where'] == 's') and (action['vote_type'] != "override"):
senate_vote = action
if senate_vote:
history['senate_cloture_result'] = senate_vote['result']
history['senate_cloture_result_at'] = senate_vote['acted_at']
vetoed = None
for action in actions:
if action['type'] == 'vetoed':
vetoed = action
if vetoed:
history['vetoed'] = True
history['vetoed_at'] = vetoed['acted_at']
else:
history['vetoed'] = False
house_override_vote = None
for action in actions:
if (action['type'] == 'vote') and (action['where'] == 'h') and (action['vote_type'] == "override"):
house_override_vote = action
if house_override_vote:
history['house_override_result'] = house_override_vote['result']
history['house_override_result_at'] = house_override_vote['acted_at']
senate_override_vote = None
for action in actions:
if (action['type'] == 'vote') and (action['where'] == 's') and (action['vote_type'] == "override"):
senate_override_vote = action
if senate_override_vote:
history['senate_override_result'] = senate_override_vote['result']
history['senate_override_result_at'] = senate_override_vote['acted_at']
enacted = None
for action in actions:
if action['type'] == 'enacted':
enacted = action
if enacted:
history['enacted'] = True
history['enacted_at'] = action['acted_at']
else:
history['enacted'] = False
topresident = None
for action in actions:
if action['type'] == 'topresident':
topresident = action
if topresident and (not history['vetoed']) and (not history['enacted']):
history['awaiting_signature'] = True
history['awaiting_signature_since'] = action['acted_at']
else:
history['awaiting_signature'] = False
return history
# find the first action beyond the standard actions every bill gets.
# - if the bill's first action is "referral" then the first action not those
# most common
# e.g. hr3590-111 (active), s1-113 (inactive)
# - if the bill's first action is "action", then the next action, if one is present
# resolutions
# e.g. sres5-113 (active), sres4-113 (inactive)
# - if the bill's first action is anything else (e.g. "vote"), then that first action
# bills that skip committee
# e.g. s227-113 (active)
def activation_from(actions):
# there's NOT always at least one :(
# as of 2013-06-10, hr2272-113 has no actions at all
if len(actions) == 0:
return None
first = actions[0]
if first['type'] in ["referral", "calendar", "action"]:
for action in actions[1:]:
if (action['type'] != "referral") and (action['type'] != "calendar") and ("Sponsor introductory remarks" not in action['text']):
return action
return None
else:
return first
def parse_bill_action(action_dict, prev_status, bill_id, title):
"""Parse a THOMAS bill action line. Returns attributes to be set in the XML file on the action line."""
bill_type, number, congress = utils.split_bill_id(bill_id)
line = action_dict['text']
status = None
action = {
"type": "action"
}
# If a line starts with an amendment number, this action is on the amendment and cannot
# be parsed yet.
m = re.search(r"^(H|S)\.Amdt\.(\d+)", line, re.I)
if m != None:
# Process actions specific to amendments separately.
return None, None
# Otherwise, parse the action line for key actions.
# VOTES
# A House Vote.
line = re.sub(", the Passed", ", Passed", line) # 106 h4733 and others
m = re.search("("
+ "|".join([
"On passage",
"Passed House",
"Two-thirds of the Members present having voted in the affirmative the bill is passed,?",
"On motion to suspend the rules and pass the (?:bill|resolution)",
"On agreeing to the (?:resolution|conference report)",
"On motion to suspend the rules and agree to the (?:resolution|conference report)",
"House Agreed to Senate Amendments.*?",
"On motion (?:that )?the House (?:suspend the rules and )?(?:agree(?: with an amendment)? to|concur in) the Senate amendments?(?: to the House amendments?| to the Senate amendments?)*",
])
+ ")"
+ "(, the objections of the President to the contrary notwithstanding.?)?"
+ "(, as amended| \(Amended\))?"
+ "\.? (Passed|Failed|Agreed to|Rejected)?" # hr1625-115 has a stray period here
+ " ?(by voice vote|without objection|by (the Yeas and Nays?|Yea-Nay Vote|recorded vote)"
+ "(:? \(2/3 required\))?: (\d+ ?- ?\d+(, \d+ Present)? [ \)]*)?\((Roll no\.|Record Vote No:) \d+\))",
line, re.I)
if m != None:
motion, is_override, as_amended, pass_fail, how = m.group(1), m.group(2), m.group(3), m.group(4), m.group(5)
# print(line)
# print(m.groups())
if re.search(r"Passed House|House Agreed to", motion, re.I):
pass_fail = 'pass'
elif re.search("(ayes|yeas) had prevailed", line, re.I):
pass_fail = 'pass'
elif re.search(r"Pass|Agreed", pass_fail, re.I):
pass_fail = 'pass'
else:
pass_fail = 'fail'
if "Two-thirds of the Members present" in motion:
is_override = True
if is_override:
vote_type = "override"
elif re.search(r"(agree (with an amendment )?to|concur in) the Senate amendment", line, re.I):
vote_type = "pingpong"
elif re.search("conference report", line, re.I):
vote_type = "conference"
elif bill_type[0] == "h":
vote_type = "vote"
else:
vote_type = "vote2"
roll = None
m = re.search(r"\((Roll no\.|Record Vote No:) (\d+)\)", how, re.I)
if m != None:
how = "roll" # normalize the ugly how
roll = m.group(2)
suspension = None
if roll and "On motion to suspend the rules" in motion:
suspension = True
# alternate form of as amended, e.g. hr3979-113
if "the House agree with an amendment" in motion:
as_amended = True
action["type"] = "vote"
action["vote_type"] = vote_type
action["how"] = how
action['where'] = "h"
action['result'] = pass_fail
if roll:
action["roll"] = roll
action["suspension"] = suspension
# correct upstream data error
if bill_id == "s2012-114" and "Roll no. 250" in line: as_amended = True
if bill_id == "s2943-114" and "On passage Passed without objection" in line: as_amended = True
# get the new status of the bill after this vote
new_status = new_status_after_vote(vote_type, pass_fail == "pass", "h", bill_type, suspension, as_amended, title, prev_status)
if new_status:
status = new_status
# Passed House, not necessarily by an actual vote (think "deem")
m = re.search(r"Passed House pursuant to|House agreed to Senate amendment (with amendment )?pursuant to|Pursuant to the provisions of [HSCONJRES\. ]+ \d+, [HSCONJRES\. ]+ \d+ is considered passed House", line, re.I)
if m != None:
vote_type = "vote" if (bill_type[0] == "h") else "vote2"
if "agreed to Senate amendment" in line: vote_type = "pingpong"
pass_fail = "pass"
as_amended = ("with amendment" in line) or ("as amended" in line)
action["type"] = "vote"
action["vote_type"] = vote_type
action["how"] = "by special rule"
action["where"] = "h"
action["result"] = pass_fail
# It's always pursuant to another bill, and a bill number is given in the action line, which we parse out
# into the bill_ids field of the action. It's also represented
# structurally in the links->link elements of the original XML which we just put in "links".
# get the new status of the bill after this vote
new_status = new_status_after_vote(vote_type, pass_fail == "pass", "h", bill_type, False, as_amended, title, prev_status)
if new_status:
status = new_status
# House motions to table adversely dispose of a pending matter, if agreed to. An agreed-to "motion to table the measure",
# which is very infrequent, kills the legislation. If not agreed to, nothing changes. So this regex only captures
# agreed-to motions to table.
m = re.search("On motion to table the measure Agreed to"
+ " ?(by voice vote|without objection|by (the Yeas and Nays|Yea-Nay Vote|recorded vote)"
+ ": (\d+ - \d+(, \d+ Present)? [ \)]*)?\((Roll no\.|Record Vote No:) \d+\))",
line, re.I)
if m != None:
how = m.group(1)
pass_fail = 'fail'
# In order to classify this as resulting in the same thing as regular failed vote on passage, new_status_after_vote
# needs to know if this was a vote in the originating chamber or not.
if prev_status == "INTRODUCED" or bill_id.startswith("hres"):
vote_type = "vote"
elif False:
vote_type = "vote2"
else:
raise Exception("Need to classify %s as being in the originating chamber or not." % prev_status)
roll = None
m = re.search(r"\((Roll no\.|Record Vote No:) (\d+)\)", how, re.I)
if m != None:
how = "roll" # normalize the ugly how
roll = m.group(2)
action["type"] = "vote"
action["vote_type"] = vote_type
action["how"] = how
action['where'] = "h"
action['result'] = pass_fail
if roll:
action["roll"] = roll
# get the new status of the bill after this vote
new_status = new_status_after_vote(vote_type, pass_fail == "pass", "h", bill_type, False, False, title, prev_status)
if new_status:
status = new_status
# A Senate Vote
# (There are some annoying weird cases of double spaces which are taken care of
# at the end.)
m = re.search("("
+ "|".join([
"Passed Senate",
"Failed of passage in Senate",
"Disagreed to in Senate",
"Resolution agreed to in Senate",
"Senate (?:agreed to|concurred in) (?:the )?(?:conference report|House amendment(?: to the Senate amendments?| to the House amendments?)*)",
"Senate receded from its amendment and concurred", # hr1-115
r"Cloture \S*\s?on the motion to proceed .*?not invoked in Senate",
r"Cloture(?: motion)? on the motion to proceed to the (?:bill|measure) invoked in Senate",
"Cloture invoked in Senate",
"Cloture on (?:the motion to (?:proceed to |concur in )(?:the House amendment (?:to the Senate amendment )?to )?)(?:the bill|H.R. .*) (?:not )?invoked in Senate",
"(?:Introduced|Received|Submitted) in the Senate, (?:read twice, |considered, |read the third time, )+and (?:passed|agreed to)",
])
+ ")"
+ "(,?.*,?) "
+ "(without objection|by Unanimous Consent|by Voice Vote|(?:by )?Yea-Nay( Vote)?\. \d+\s*-\s*\d+\. Record Vote (No|Number): \d+)",
line.replace(" ", " "), re.I)
if m != None:
motion, extra, how = m.group(1), m.group(2), m.group(3)
roll = None
# put disagreed check first, cause "agreed" is contained inside it
if re.search("disagreed|not invoked", motion, re.I):
pass_fail = "fail"
elif re.search("passed|agreed|concurred|invoked", motion, re.I):
pass_fail = "pass"
else:
pass_fail = "fail"
voteaction_type = "vote"
if re.search("over veto", extra, re.I):
vote_type = "override"
elif re.search("conference report", motion, re.I):
vote_type = "conference"
elif re.search("cloture", motion, re.I):
vote_type = "cloture"
voteaction_type = "vote-aux" # because it is not a vote on passage
elif re.search("Senate agreed to (the )?House amendment|Senate concurred in (the )?House amendment", motion, re.I):
vote_type = "pingpong"
elif bill_type[0] == "s":
vote_type = "vote"
else:
vote_type = "vote2"
m = re.search(r"Record Vote (No|Number): (\d+)", how, re.I)
if m != None:
roll = m.group(2)
how = "roll"
as_amended = False
if re.search(r"with amendments|with an amendment", extra, re.I):
as_amended = True
action["type"] = voteaction_type
action["vote_type"] = vote_type
action["how"] = how
action["result"] = pass_fail
action["where"] = "s"
if roll:
action["roll"] = roll
# get the new status of the bill after this vote
new_status = new_status_after_vote(vote_type, pass_fail == "pass", "s", bill_type, False, as_amended, title, prev_status)
if new_status:
status = new_status
# OLD-STYLE VOTES (93rd Congress-ish)
m = re.search(r"Measure passed (House|Senate)(, amended(?: \(.*?\)|, with an amendment to the title)?)?(?:,? in lieu[^,]*)?(?:, roll call #(\d+) \(\d+-\d+\))?", line, re.I)
if m != None:
chamber = m.group(1)[0].lower() # 'h' or 's'
as_amended = m.group(2)
roll_num = m.group(3)
# GovTrack legacy scraper missed these: if chamber == 's' and (as_amended or roll_num or "lieu" in line): return action, status
pass_fail = "pass"
vote_type = "vote" if bill_type[0] == chamber else "vote2"
action["type"] = "vote"
action["vote_type"] = vote_type
action["how"] = "(method not recorded)" if not roll_num else "roll"
if roll_num:
action["roll"] = roll_num
action["result"] = pass_fail
action["where"] = chamber
new_status = new_status_after_vote(vote_type, pass_fail == "pass", chamber, bill_type, False, as_amended, title, prev_status)
if new_status:
status = new_status
m = re.search(r"(House|Senate) agreed to (?:House|Senate) amendments?( with an amendment)?( under Suspension of the Rules)?(?:, roll call #(\d+) \(\d+-\d+\))?\.", line, re.I)
if m != None:
chamber = m.group(1)[0].lower() # 'h' or 's'
as_amended = m.group(2)
suspension = m.group(3)
roll_num = m.group(4)
# GovTrack legacy scraper missed these: if (chamber == 'h' and not roll_num) or (chamber == 's' and rull_num): return action, status # REMOVE ME
pass_fail = "pass"
vote_type = "pingpong"
action["type"] = "vote"
action["vote_type"] = vote_type
action["how"] = "(method not recorded)" if not roll_num else "roll"
if roll_num:
action["roll"] = roll_num
action["result"] = pass_fail
action["where"] = chamber
action["suspension"] = (suspension != None)
new_status = new_status_after_vote(vote_type, pass_fail == "pass", chamber, bill_type, False, as_amended, title, prev_status)
if new_status:
status = new_status
# PSUDO-REPORTING (because GovTrack did this, but should be changed)
# TODO: Make a new status for this as pre-reported.
m = re.search(r"Placed on (the )?([\w ]+) Calendar( under ([\w ]+))?[,\.] Calendar No\. (\d+)\.|Committee Agreed to Seek Consideration Under Suspension of the Rules|Ordered to be Reported", line, re.I)
if m != None:
# TODO: This makes no sense.
if prev_status in ("INTRODUCED", "REFERRED"):
status = "REPORTED"
action["type"] = "calendar"
# TODO: Useless. But good for GovTrack compatibility.
if m.group(2): # not 'Ordered to be Reported'
action["calendar"] = m.group(2)
action["under"] = m.group(4)
action["number"] = m.group(5)
# COMMITTEE ACTIONS
# reported
m = re.search(r"Committee on (.*)\. Reported by", line, re.I)
if m != None:
action["type"] = "reported"
action["committee"] = m.group(1)
if prev_status in ("INTRODUCED", "REFERRED"):
status = "REPORTED"
m = re.search(r"Reported to Senate from the (.*?)( \(without written report\))?\.", line, re.I)
if m != None: # 93rd Congress
action["type"] = "reported"
action["committee"] = m.group(1)
if prev_status in ("INTRODUCED", "REFERRED"):
status = "REPORTED"
# hearings held by a committee
m = re.search(r"(Committee on .*?)\. Hearings held", line, re.I)
if m != None:
action["committee"] = m.group(1)
action["type"] = "hearings"
m = re.search(r"Committee on (.*)\. Discharged (by Unanimous Consent)?", line, re.I)
if m != None:
action["committee"] = m.group(1)
action["type"] = "discharged"
if prev_status in ("INTRODUCED", "REFERRED"):
status = "REPORTED"
m = re.search("Cleared for White House|Presented to President", line, re.I)
if m != None:
action["type"] = "topresident"
m = re.search("Signed by President", line, re.I)
if m != None:
action["type"] = "signed"
status = "ENACTED:SIGNED"
m = re.search("Pocket Vetoed by President", line, re.I)
if m != None:
action["type"] = "vetoed"
action["pocket"] = "1"
status = "VETOED:POCKET"
# need to put this in an else, or this regex will match the pocket veto and override it
else:
m = re.search("Vetoed by President", line, re.I)
if m != None:
action["type"] = "vetoed"
status = "PROV_KILL:VETO"
m = re.search("Sent to Archivist of the United States unsigned", line, re.I)
if m != None:
status = "ENACTED:TENDAYRULE"
m = re.search("^(?:Became )?(Public|Private) Law(?: No:)? ([\d\-]+)\.", line, re.I)
if m != None:
action["law"] = m.group(1).lower()
pieces = m.group(2).split("-")
action["congress"] = pieces[0]
action["number"] = pieces[1]
action["type"] = "enacted"
if prev_status in ("ENACTED:SIGNED", "ENACTED:VETO_OVERRIDE", "ENACTED:TENDAYRULE"):
pass # this is a final administrative step
elif prev_status == "PROV_KILL:VETO" or prev_status.startswith("VETOED:"):
# somehow missed the override steps
status = "ENACTED:VETO_OVERRIDE"
elif bill_id in ("s2641-93", "hr1589-94", "s2527-100", "hr1677-101", "hr2978-101", "hr2126-104", "s1322-104"):
status = "ENACTED:TENDAYRULE"
else:
raise Exception("Missing Signed by President action? If this is a case of the 10-day rule, hard code the bill id %s here." % bill_id)
# Check for referral type
m = re.search(r"Referred to (?:the )?(House|Senate)?\s?(?:Committee|Subcommittee)?", line, re.I)
if m != None:
action["type"] = "referral"
if prev_status == "INTRODUCED":
status = "REFERRED"
# sweep the action line for bill IDs of related bills
bill_ids = utils.extract_bills(line, congress)
bill_ids = [b for b in bill_ids if b != bill_id]
if bill_ids and (len(bill_ids) > 0):
action['bill_ids'] = bill_ids
return action, status
def new_status_after_vote(vote_type, passed, chamber, bill_type, suspension, amended, title, prev_status):
if vote_type == "vote": # vote in originating chamber
if passed:
if bill_type in ("hres", "sres"):
return 'PASSED:SIMPLERES' # end of life for a simple resolution
if chamber == "h":
return 'PASS_OVER:HOUSE' # passed by originating chamber, now in second chamber
else:
return 'PASS_OVER:SENATE' # passed by originating chamber, now in second chamber
if suspension:
return 'PROV_KILL:SUSPENSIONFAILED' # provisionally killed by failure to pass under suspension of the rules
if chamber == "h":
return 'FAIL:ORIGINATING:HOUSE' # outright failure
else:
return 'FAIL:ORIGINATING:SENATE' # outright failure
if vote_type in ("vote2", "pingpong"): # vote in second chamber or subsequent pingpong votes
if passed:
if amended:
# mesure is passed but not in identical form
if chamber == "h":
return 'PASS_BACK:HOUSE' # passed both chambers, but House sends it back to Senate
else:
return 'PASS_BACK:SENATE' # passed both chambers, but Senate sends it back to House
else:
# bills and joint resolutions not constitutional amendments, not amended from Senate version
if bill_type in ("hjres", "sjres") and title.startswith("Proposing an amendment to the Constitution of the United States"):
return 'PASSED:CONSTAMEND' # joint resolution that looks like an amendment to the constitution
if bill_type in ("hconres", "sconres"):
return 'PASSED:CONCURRENTRES' # end of life for concurrent resolutions
return 'PASSED:BILL' # passed by second chamber, now on to president
if vote_type == "pingpong":
# chamber failed to accept the other chamber's changes, but it can vote again
return 'PROV_KILL:PINGPONGFAIL'
if suspension:
return 'PROV_KILL:SUSPENSIONFAILED' # provisionally killed by failure to pass under suspension of the rules
if chamber == "h":
return 'FAIL:SECOND:HOUSE' # outright failure
else:
return 'FAIL:SECOND:SENATE' # outright failure
if vote_type == "cloture":
if not passed:
return "PROV_KILL:CLOTUREFAILED"
else:
return None
if vote_type == "override":
if not passed:
if bill_type[0] == chamber:
if chamber == "h":
return 'VETOED:OVERRIDE_FAIL_ORIGINATING:HOUSE'
else:
return 'VETOED:OVERRIDE_FAIL_ORIGINATING:SENATE'
else:
if chamber == "h":
return 'VETOED:OVERRIDE_FAIL_SECOND:HOUSE'
else:
return 'VETOED:OVERRIDE_FAIL_SECOND:SENATE'
else:
if bill_type[0] == chamber:
if chamber == "h":
return 'VETOED:OVERRIDE_PASS_OVER:HOUSE'
else:
return 'VETOED:OVERRIDE_PASS_OVER:SENATE'
else:
# The override passed both chambers -- the veto is overridden.
return "ENACTED:VETO_OVERRIDE"
if vote_type == "conference":
# This is tricky to integrate into status because we have to wait for both
# chambers to pass the conference report.
if passed:
if prev_status.startswith("CONFERENCE:PASSED:"):
if bill_type in ("hjres", "sjres") and title.startswith("Proposing an amendment to the Constitution of the United States"):
return 'PASSED:CONSTAMEND' # joint resolution that looks like an amendment to the constitution
if bill_type in ("hconres", "sconres"):
return 'PASSED:CONCURRENTRES' # end of life for concurrent resolutions
return 'PASSED:BILL'
else:
if chamber == "h":
return 'CONFERENCE:PASSED:HOUSE'
else:
return 'CONFERENCE:PASSED:SENATE'
return None
def amendments_for(amendment_list):
if amendment_list is None:
return []
amendment_list = amendment_list['amendment']
def build_dict(item):
# Malformed XML containing duplicate elements causes attributes to parse as a list
for attr in ['type', 'number', 'congress']:
if type(item[attr]) is list:
item[attr] = item[attr][0]
return {
'amendment_id': "{0}{1}-{2}".format(item['type'].lower(), item['number'], item['congress']),
'amendment_type': item['type'].lower(),
'chamber': item['type'][0].lower(),
'number': item['number']
}
return [build_dict(amendment) for amendment in amendment_list]
def committee_reports_for(committeeReports):
ret = []
for report in (committeeReports or {}).get("committeeReport", []):
ret.append( report["citation"] )
return ret
|
cc0-1.0
|
298f2600bf22574d79701dcf5fa65269
| 40.8229
| 346
| 0.581983
| 3.558024
| false
| false
| false
| false
|
fedspendingtransparency/data-act-broker-backend
|
tests/unit/mock_helpers.py
|
1
|
1924
|
from unittest.mock import Mock
class DynamicObject:
pass
class MockFilter(object): # This is the ONE parameter constructor
def __init__(self):
self._count = 0
self._first = DynamicObject()
def first(self): # This is the another method that's just coming along for the ride.
return self._first
def count(self): # This is the needed Count method
return self._count
class MockQuery(object): # This is the ONE parameter constructor
def __init__(self):
self._filter = MockFilter()
self._filter_by = MockFilter()
def filter(self, place_holder): # This is used to mimic the query.filter() call
return self._filter
def filter_by(self, **kwargs): # This is used to mimic the query.filter_by() call
return self._filter_by
class MockSession(object):
def __init__(self):
self._query = MockQuery()
self.dirty = []
def flush(self):
pass
def query(self, place_holder): # This is used to mimic the session.query call
return self._query
def mock_response(
status=200,
content="CONTENT",
json_data=None,
raise_for_status=None,
url=None):
"""
since we typically test a bunch of different
requests calls for a service, we are going to do
a lot of mock responses, so its usually a good idea
to have a helper function that builds these things
"""
mock_resp = Mock()
# mock raise_for_status call w/optional error
mock_resp.raise_for_status = Mock()
if raise_for_status:
mock_resp.raise_for_status.side_effect = raise_for_status
# set status code and content
mock_resp.status_code = status
mock_resp.content = content
mock_resp.url = url
# add json data if provided
if json_data:
mock_resp.json = Mock(
return_value=json_data
)
return mock_resp
|
cc0-1.0
|
d4dfdf00d27fbc89ac3eda967c2aaef0
| 26.485714
| 89
| 0.630457
| 3.96701
| false
| false
| false
| false
|
fedspendingtransparency/data-act-broker-backend
|
dataactcore/migrations/versions/c42d328ef2fa_add_frec_code_to_submission_model.py
|
1
|
1050
|
"""add frec_code to submission model
Revision ID: c42d328ef2fa
Revises: 4d8408c33fee
Create Date: 2017-07-10 13:16:56.855163
"""
# revision identifiers, used by Alembic.
revision = 'c42d328ef2fa'
down_revision = '4d8408c33fee'
branch_labels = None
depends_on = None
from alembic import op
import sqlalchemy as sa
def upgrade(engine_name):
globals()["upgrade_%s" % engine_name]()
def downgrade(engine_name):
globals()["downgrade_%s" % engine_name]()
def upgrade_data_broker():
### commands auto generated by Alembic - please adjust! ###
op.add_column('submission', sa.Column('frec_code', sa.Text(), nullable=True))
op.add_column('frec', sa.Column('cgac_code', sa.Text(), nullable=True))
### end Alembic commands ###
def downgrade_data_broker():
op.execute("DELETE FROM submission "
"WHERE cgac_code IS NULL")
### commands auto generated by Alembic - please adjust! ###
op.drop_column('frec', 'cgac_code')
op.drop_column('submission', 'frec_code')
### end Alembic commands ###
|
cc0-1.0
|
ca18fbce7807457ac2411d85b4d334a0
| 22.863636
| 81
| 0.671429
| 3.291536
| false
| false
| false
| false
|
fedspendingtransparency/data-act-broker-backend
|
dataactcore/migrations/versions/4a1988f74a78_add_detached_d2_submission_models.py
|
1
|
7389
|
"""add detached D2 submission models
Revision ID: 4a1988f74a78
Revises: 4bf29ae16467
Create Date: 2017-01-20 11:40:50.782401
"""
# revision identifiers, used by Alembic.
revision = '4a1988f74a78'
down_revision = '4bf29ae16467'
branch_labels = None
depends_on = None
from alembic import op
import sqlalchemy as sa
def upgrade(engine_name):
globals()["upgrade_%s" % engine_name]()
def downgrade(engine_name):
globals()["downgrade_%s" % engine_name]()
def upgrade_data_broker():
### commands auto generated by Alembic - please adjust! ###
op.create_table('detached_award_financial_assistance',
sa.Column('created_at', sa.DateTime(), nullable=True),
sa.Column('updated_at', sa.DateTime(), nullable=True),
sa.Column('detached_award_financial_assistance_id', sa.Integer(), nullable=False),
sa.Column('submission_id', sa.Integer(), nullable=False),
sa.Column('job_id', sa.Integer(), nullable=False),
sa.Column('row_number', sa.Integer(), nullable=False),
sa.Column('action_date', sa.Text(), nullable=True),
sa.Column('action_type', sa.Text(), nullable=True),
sa.Column('assistance_type', sa.Text(), nullable=True),
sa.Column('award_description', sa.Text(), nullable=True),
sa.Column('awardee_or_recipient_legal', sa.Text(), nullable=True),
sa.Column('awardee_or_recipient_uniqu', sa.Text(), nullable=True),
sa.Column('awarding_agency_code', sa.Text(), nullable=True),
sa.Column('awarding_agency_name', sa.Text(), nullable=True),
sa.Column('awarding_office_code', sa.Text(), nullable=True),
sa.Column('awarding_office_name', sa.Text(), nullable=True),
sa.Column('awarding_sub_tier_agency_c', sa.Text(), nullable=True),
sa.Column('awarding_sub_tier_agency_n', sa.Text(), nullable=True),
sa.Column('award_modification_amendme', sa.Text(), nullable=True),
sa.Column('business_funds_indicator', sa.Text(), nullable=True),
sa.Column('business_types', sa.Text(), nullable=True),
sa.Column('cfda_number', sa.Text(), nullable=True),
sa.Column('cfda_title', sa.Text(), nullable=True),
sa.Column('correction_late_delete_ind', sa.Text(), nullable=True),
sa.Column('face_value_loan_guarantee', sa.Numeric(), nullable=True),
sa.Column('fain', sa.Text(), nullable=True),
sa.Column('federal_action_obligation', sa.Numeric(), nullable=True),
sa.Column('fiscal_year_and_quarter_co', sa.Text(), nullable=True),
sa.Column('funding_agency_code', sa.Text(), nullable=True),
sa.Column('funding_agency_name', sa.Text(), nullable=True),
sa.Column('funding_office_name', sa.Text(), nullable=True),
sa.Column('funding_office_code', sa.Text(), nullable=True),
sa.Column('funding_sub_tier_agency_co', sa.Text(), nullable=True),
sa.Column('funding_sub_tier_agency_na', sa.Text(), nullable=True),
sa.Column('legal_entity_address_line1', sa.Text(), nullable=True),
sa.Column('legal_entity_address_line2', sa.Text(), nullable=True),
sa.Column('legal_entity_address_line3', sa.Text(), nullable=True),
sa.Column('legal_entity_city_code', sa.Text(), nullable=True),
sa.Column('legal_entity_city_name', sa.Text(), nullable=True),
sa.Column('legal_entity_congressional', sa.Text(), nullable=True),
sa.Column('legal_entity_country_code', sa.Text(), nullable=True),
sa.Column('legal_entity_county_code', sa.Text(), nullable=True),
sa.Column('legal_entity_county_name', sa.Text(), nullable=True),
sa.Column('legal_entity_foreign_city', sa.Text(), nullable=True),
sa.Column('legal_entity_foreign_posta', sa.Text(), nullable=True),
sa.Column('legal_entity_foreign_provi', sa.Text(), nullable=True),
sa.Column('legal_entity_state_code', sa.Text(), nullable=True),
sa.Column('legal_entity_state_name', sa.Text(), nullable=True),
sa.Column('legal_entity_zip5', sa.Text(), nullable=True),
sa.Column('legal_entity_zip_last4', sa.Text(), nullable=True),
sa.Column('non_federal_funding_amount', sa.Numeric(), nullable=True),
sa.Column('original_loan_subsidy_cost', sa.Numeric(), nullable=True),
sa.Column('period_of_performance_curr', sa.Text(), nullable=True),
sa.Column('period_of_performance_star', sa.Text(), nullable=True),
sa.Column('place_of_performance_city', sa.Text(), nullable=True),
sa.Column('place_of_performance_code', sa.Text(), nullable=True),
sa.Column('place_of_performance_congr', sa.Text(), nullable=True),
sa.Column('place_of_perform_country_c', sa.Text(), nullable=True),
sa.Column('place_of_perform_county_na', sa.Text(), nullable=True),
sa.Column('place_of_performance_forei', sa.Text(), nullable=True),
sa.Column('place_of_perform_state_nam', sa.Text(), nullable=True),
sa.Column('place_of_performance_zip4a', sa.Text(), nullable=True),
sa.Column('record_type', sa.Integer(), nullable=True),
sa.Column('sai_number', sa.Text(), nullable=True),
sa.Column('total_funding_amount', sa.Numeric(), nullable=True),
sa.Column('uri', sa.Text(), nullable=True),
sa.Column('is_valid', sa.Boolean(), server_default='False', nullable=False),
sa.PrimaryKeyConstraint('detached_award_financial_assistance_id')
)
op.create_index(op.f('ix_detached_award_financial_assistance_fain'), 'detached_award_financial_assistance', ['fain'], unique=False)
op.create_index(op.f('ix_detached_award_financial_assistance_job_id'), 'detached_award_financial_assistance', ['job_id'], unique=False)
op.create_index(op.f('ix_detached_award_financial_assistance_submission_id'), 'detached_award_financial_assistance', ['submission_id'], unique=False)
op.create_index(op.f('ix_detached_award_financial_assistance_uri'), 'detached_award_financial_assistance', ['uri'], unique=False)
op.create_table('submission_sub_tier_affiliation',
sa.Column('created_at', sa.DateTime(), nullable=True),
sa.Column('updated_at', sa.DateTime(), nullable=True),
sa.Column('submission_sub_tier_affiliation_id', sa.Integer(), nullable=False),
sa.Column('submission_id', sa.Integer(), nullable=True),
sa.Column('sub_tier_agency_id', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['sub_tier_agency_id'], ['sub_tier_agency.sub_tier_agency_id'], name='fk_sub_tier_submission_affiliation_agency_id'),
sa.ForeignKeyConstraint(['submission_id'], ['submission.submission_id'], name='fk_submission_sub_tier_affiliation_id'),
sa.PrimaryKeyConstraint('submission_sub_tier_affiliation_id')
)
op.add_column('submission', sa.Column('d2_submission', sa.Boolean(), server_default='False', nullable=False))
### end Alembic commands ###
def downgrade_data_broker():
### commands auto generated by Alembic - please adjust! ###
op.drop_column('submission', 'd2_submission')
op.drop_table('submission_sub_tier_affiliation')
op.drop_index(op.f('ix_detached_award_financial_assistance_uri'), table_name='detached_award_financial_assistance')
op.drop_index(op.f('ix_detached_award_financial_assistance_submission_id'), table_name='detached_award_financial_assistance')
op.drop_index(op.f('ix_detached_award_financial_assistance_job_id'), table_name='detached_award_financial_assistance')
op.drop_index(op.f('ix_detached_award_financial_assistance_fain'), table_name='detached_award_financial_assistance')
op.drop_table('detached_award_financial_assistance')
### end Alembic commands ###
|
cc0-1.0
|
53b84f20fffd4fb8e83c6655c6e24e67
| 55.838462
| 153
| 0.696711
| 3.168525
| false
| false
| false
| false
|
fedspendingtransparency/data-act-broker-backend
|
dataactcore/migrations/versions/2ae156c8f46d_update_d1_and_d2_for_daims_v1_1.py
|
1
|
4727
|
"""update d1 and d2 for daims v1.1
Revision ID: 2ae156c8f46d
Revises: 4b1ee78268fb
Create Date: 2017-08-28 15:16:00.926683
"""
# revision identifiers, used by Alembic.
revision = '2ae156c8f46d'
down_revision = '4b1ee78268fb'
branch_labels = None
depends_on = None
from alembic import op
import sqlalchemy as sa
def upgrade(engine_name):
globals()["upgrade_%s" % engine_name]()
def downgrade(engine_name):
globals()["downgrade_%s" % engine_name]()
def upgrade_data_broker():
### commands auto generated by Alembic - please adjust! ###
op.add_column('award_procurement', sa.Column('award_or_idv_flag', sa.Text(), nullable=True))
op.add_column('award_procurement', sa.Column('place_of_perform_country_n', sa.Text(), nullable=True))
op.add_column('award_procurement', sa.Column('place_of_perform_county_na', sa.Text(), nullable=True))
op.add_column('award_procurement', sa.Column('place_of_perform_state_nam', sa.Text(), nullable=True))
op.add_column('award_procurement', sa.Column('referenced_idv_agency_name', sa.Text(), nullable=True))
op.add_column('award_procurement', sa.Column('referenced_idv_type', sa.Text(), nullable=True))
op.add_column('award_procurement', sa.Column('referenced_multi_or_single', sa.Text(), nullable=True))
op.add_column('detached_award_procurement', sa.Column('place_of_perform_country_n', sa.Text(), nullable=True))
op.add_column('detached_award_procurement', sa.Column('place_of_perform_state_nam', sa.Text(), nullable=True))
op.add_column('detached_award_procurement', sa.Column('referenced_idv_agency_name', sa.Text(), nullable=True))
op.add_column('detached_award_procurement', sa.Column('referenced_multi_or_single', sa.Text(), nullable=True))
op.add_column('detached_award_procurement', sa.Column('award_or_idv_flag', sa.Text(), nullable=True))
op.add_column('award_financial_assistance', sa.Column('legal_entity_country_name', sa.Text(), nullable=True))
op.add_column('award_financial_assistance', sa.Column('place_of_perform_country_n', sa.Text(), nullable=True))
op.add_column('award_financial_assistance', sa.Column('place_of_perform_county_co', sa.Text(), nullable=True))
op.add_column('detached_award_financial_assistance', sa.Column('legal_entity_country_name', sa.Text(), nullable=True))
op.add_column('detached_award_financial_assistance', sa.Column('place_of_perform_country_n', sa.Text(), nullable=True))
op.add_column('detached_award_financial_assistance', sa.Column('place_of_perform_county_co', sa.Text(), nullable=True))
op.add_column('published_award_financial_assistance', sa.Column('legal_entity_country_name', sa.Text(), nullable=True))
op.add_column('published_award_financial_assistance', sa.Column('place_of_perform_country_n', sa.Text(), nullable=True))
op.add_column('published_award_financial_assistance', sa.Column('place_of_perform_county_co', sa.Text(), nullable=True))
### end Alembic commands ###
def downgrade_data_broker():
### commands auto generated by Alembic - please adjust! ###
op.drop_column('published_award_financial_assistance', 'place_of_perform_county_co')
op.drop_column('published_award_financial_assistance', 'place_of_perform_country_n')
op.drop_column('published_award_financial_assistance', 'legal_entity_country_name')
op.drop_column('detached_award_financial_assistance', 'place_of_perform_county_co')
op.drop_column('detached_award_financial_assistance', 'place_of_perform_country_n')
op.drop_column('detached_award_financial_assistance', 'legal_entity_country_name')
op.drop_column('award_financial_assistance', 'place_of_perform_county_co')
op.drop_column('award_financial_assistance', 'place_of_perform_country_n')
op.drop_column('award_financial_assistance', 'legal_entity_country_name')
op.drop_column('detached_award_procurement', 'referenced_multi_or_single')
op.drop_column('detached_award_procurement', 'referenced_idv_agency_name')
op.drop_column('detached_award_procurement', 'place_of_perform_state_nam')
op.drop_column('detached_award_procurement', 'place_of_perform_country_n')
op.drop_column('detached_award_procurement', 'award_or_idv_flag')
op.drop_column('award_procurement', 'referenced_multi_or_single')
op.drop_column('award_procurement', 'referenced_idv_type')
op.drop_column('award_procurement', 'referenced_idv_agency_name')
op.drop_column('award_procurement', 'place_of_perform_state_nam')
op.drop_column('award_procurement', 'place_of_perform_county_na')
op.drop_column('award_procurement', 'place_of_perform_country_n')
op.drop_column('award_procurement', 'award_or_idv_flag')
### end Alembic commands ###
|
cc0-1.0
|
5046dca887b28fe685a5997b789fe7b4
| 58.0875
| 124
| 0.721811
| 3.077474
| false
| false
| false
| false
|
fedspendingtransparency/data-act-broker-backend
|
dataactcore/migrations/versions/4be5e411246b_adding_principle_place_street_to_.py
|
1
|
1093
|
"""Adding principle place street to subaward
Revision ID: 4be5e411246b
Revises: 87d7a9b0ea7b
Create Date: 2019-08-07 15:13:50.092991
"""
# revision identifiers, used by Alembic.
revision = '4be5e411246b'
down_revision = '87d7a9b0ea7b'
branch_labels = None
depends_on = None
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import postgresql
def upgrade(engine_name):
globals()["upgrade_%s" % engine_name]()
def downgrade(engine_name):
globals()["downgrade_%s" % engine_name]()
def upgrade_data_broker():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('subaward', sa.Column('place_of_perform_street', sa.Text(), nullable=True))
op.add_column('subaward', sa.Column('sub_place_of_perform_street', sa.Text(), nullable=True))
# ### end Alembic commands ###
def downgrade_data_broker():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_column('subaward', 'sub_place_of_perform_street')
op.drop_column('subaward', 'place_of_perform_street')
# ### end Alembic commands ###
|
cc0-1.0
|
7d4d818bddfc23ec5a4da73ab58cdb0d
| 25.02381
| 97
| 0.696249
| 3.15896
| false
| false
| false
| false
|
fedspendingtransparency/data-act-broker-backend
|
dataactbroker/routes/file_routes.py
|
1
|
14201
|
from flask import request
from webargs import fields as webargs_fields, validate as webargs_validate
from webargs.flaskparser import use_kwargs
from dataactbroker.handlers.fileHandler import (
FileHandler, get_status, list_submissions as list_submissions_handler,
list_published_files as list_published_files_handler, get_upload_file_url, get_detached_upload_file_url,
get_submission_comments, submission_report_url, update_submission_comments, list_history, file_history_url,
get_comments_file, get_submission_zip)
from dataactbroker.handlers.submission_handler import (
delete_all_submission_data, get_submission_stats, list_banners, check_current_submission_page,
publish_dabs_submission, certify_dabs_submission, publish_and_certify_dabs_submission, get_published_submission_ids,
get_submission_metadata, get_submission_data, get_revalidation_threshold, get_latest_publication_period,
revert_to_published)
from dataactbroker.decorators import convert_to_submission_id
from dataactbroker.permissions import (requires_login, requires_submission_perms, requires_agency_perms,
requires_sub_agency_perms)
from dataactcore.interfaces.function_bag import get_fabs_meta
from dataactcore.models.lookups import FILE_TYPE_DICT, FILE_TYPE_DICT_LETTER
from dataactcore.utils.jsonResponse import JsonResponse
from dataactcore.utils.requestDictionary import RequestDictionary
from dataactcore.utils.statusCode import StatusCode
# Add the file submission route
def add_file_routes(app, is_local, server_path):
""" Create routes related to file submission for flask app """
# Keys for the post route will correspond to the four types of files
@app.route("/v1/upload_dabs_files/", methods=["POST"])
@requires_agency_perms('writer')
def upload_dabs_files():
if "multipart/form-data" not in request.headers['Content-Type']:
return JsonResponse.error(ValueError("Request must be a multipart/form-data type"), StatusCode.CLIENT_ERROR)
file_manager = FileHandler(request, is_local=is_local, server_path=server_path)
return file_manager.validate_upload_dabs_files()
@app.route("/v1/check_status/", methods=["GET"])
@convert_to_submission_id
@requires_submission_perms('reader')
@use_kwargs({'type': webargs_fields.String(missing='')})
def check_status(submission, **kwargs):
type = kwargs.get('type')
return get_status(submission, type)
@app.route("/v1/submission_metadata/", methods=["GET"])
@convert_to_submission_id
@requires_submission_perms('reader')
def submission_metadata(submission):
return JsonResponse.create(StatusCode.OK, get_submission_metadata(submission))
@app.route("/v1/submission_data/", methods=["GET"])
@convert_to_submission_id
@requires_submission_perms('reader')
@use_kwargs({'type': webargs_fields.String(missing='')})
def submission_data(submission, **kwargs):
type = kwargs.get('type')
return get_submission_data(submission, type)
@app.route("/v1/revalidation_threshold/", methods=["GET"])
@requires_login
def revalidation_threshold():
return JsonResponse.create(StatusCode.OK, get_revalidation_threshold())
@app.route("/v1/latest_publication_period/", methods=["GET"])
@requires_login
def latest_publication_period():
return JsonResponse.create(StatusCode.OK, get_latest_publication_period())
@app.route("/v1/list_banners/", methods=["GET"])
@use_kwargs({'login': webargs_fields.Boolean(missing=False)})
def get_banner_list(login):
return list_banners(login)
@app.route("/v1/list_submissions/", methods=["POST"])
@requires_login
@use_kwargs({
'page': webargs_fields.Int(missing=1),
'limit': webargs_fields.Int(missing=5),
'published': webargs_fields.String(
required=True,
validate=webargs_validate.OneOf(('mixed', 'true', 'false'))),
'sort': webargs_fields.String(missing='modified'),
'order': webargs_fields.String(missing='desc'),
'fabs': webargs_fields.Bool(missing=False),
'filters': webargs_fields.Dict(keys=webargs_fields.String(), missing={})
})
def list_submissions(published, **kwargs):
""" List submission IDs associated with the current user """
page = kwargs.get('page')
limit = kwargs.get('limit')
sort = kwargs.get('sort')
order = kwargs.get('order')
fabs = kwargs.get('fabs')
filters = kwargs.get('filters')
return list_submissions_handler(page, limit, published, sort, order, fabs, filters)
@app.route("/v1/list_latest_published_files/", methods=["GET"])
@requires_login
@use_kwargs({
'type': webargs_fields.String(
required=True,
validate=webargs_validate.OneOf(('fabs', 'dabs'))),
'agency': webargs_fields.String(),
'year': webargs_fields.Int(),
'period': webargs_fields.Int()
})
def list_latest_published_files(**kwargs):
""" List submission IDs associated with the current user """
sub_type = kwargs.get('type')
agency = kwargs.get('agency')
year = kwargs.get('year')
period = kwargs.get('period')
return list_published_files_handler(sub_type, agency, year, period)
@app.route("/v1/list_history/", methods=['GET'])
@convert_to_submission_id
@requires_submission_perms('reader')
def submission_list_history(submission):
""" List all publish and certify history for a specific submission """
return list_history(submission)
@app.route("/v1/get_certified_file/", methods=["GET"])
@use_kwargs({
'submission_id': webargs_fields.Int(required=True),
'published_files_history_id': webargs_fields.Int(required=True),
'is_warning': webargs_fields.Bool(missing=False)
})
@requires_submission_perms('reader')
def get_certified_file(submission, published_files_history_id, **kwargs):
""" Get the signed URL for the specified file history """
is_warning = kwargs.get('is_warning')
return file_history_url(published_files_history_id, is_warning, is_local, submission)
@app.route("/v1/get_submitted_published_file/", methods=["GET"])
@use_kwargs({
'published_files_history_id': webargs_fields.Int(required=True)
})
@requires_login
def get_submitted_published_file(published_files_history_id):
""" Get the signed URL for the specified submitted and published file """
return file_history_url(published_files_history_id, False, is_local)
@app.route("/v1/check_current_page/", methods=["GET"])
@convert_to_submission_id
@requires_submission_perms('reader')
def check_current_page(submission):
return check_current_submission_page(submission)
@app.route("/v1/get_fabs_meta/", methods=["GET"])
@convert_to_submission_id
@requires_submission_perms('reader')
def get_fabs_metadata(submission):
""" Return metadata of FABS submission """
return JsonResponse.create(StatusCode.OK, get_fabs_meta(submission.submission_id))
@app.route("/v1/upload_fabs_file/", methods=["POST"])
@requires_sub_agency_perms('editfabs')
def upload_fabs_file():
if "multipart/form-data" not in request.headers['Content-Type']:
return JsonResponse.error(ValueError("Request must be a multipart/form-data type"), StatusCode.CLIENT_ERROR)
params = RequestDictionary.derive(request)
fabs = params.get('_files', {}).get('fabs', None)
file_manager = FileHandler(request, is_local=is_local, server_path=server_path)
return file_manager.upload_fabs_file(fabs)
@app.route("/v1/publish_fabs_file/", methods=["POST"])
@convert_to_submission_id
@requires_submission_perms('fabs', check_owner=False)
def publish_fabs_file(submission):
file_manager = FileHandler(request, is_local=is_local, server_path=server_path)
return file_manager.publish_fabs_submission(submission)
@app.route("/v1/get_obligations/", methods=["GET"])
@convert_to_submission_id
@requires_submission_perms('reader')
def get_obligations(submission):
return JsonResponse.create(StatusCode.OK, get_submission_stats(submission.submission_id))
@app.route("/v1/get_submission_comments/", methods=['GET'])
@convert_to_submission_id
@requires_submission_perms('reader')
def get_sub_comments(submission):
return get_submission_comments(submission)
@app.route("/v1/update_submission_comments/", methods=['POST'])
@convert_to_submission_id
@requires_submission_perms('writer')
def update_sub_comments(submission):
return update_submission_comments(submission, request.json, is_local)
@app.route("/v1/get_comments_file/", methods=['GET'])
@convert_to_submission_id
@requires_submission_perms('reader')
def get_submission_comments_file(submission):
return get_comments_file(submission, is_local)
@app.route("/v1/get_submission_zip/", methods=['GET'])
@convert_to_submission_id
@requires_submission_perms('reader')
@use_kwargs({
'publish_history_id': webargs_fields.Int(),
'certify_history_id': webargs_fields.Int()
})
def get_sub_zip(submission, **kwargs):
publish_history_id = kwargs.get('publish_history_id')
certify_history_id = kwargs.get('certify_history_id')
return get_submission_zip(submission, publish_history_id, certify_history_id, is_local)
@app.route("/v1/report_url/", methods=['GET'])
@convert_to_submission_id
@requires_submission_perms('reader')
@use_kwargs({
'file_type': webargs_fields.String(
required=True,
validate=webargs_validate.OneOf(FILE_TYPE_DICT.keys() - {'executive_compensation', 'sub_award'})
),
'warning': webargs_fields.Bool(),
'cross_type': webargs_fields.String(validate=webargs_validate.OneOf(['program_activity', 'award_financial',
'award_procurement', 'award']))
})
def post_submission_report_url(submission, file_type, **kwargs):
warning = kwargs.get('warning')
cross_type = kwargs.get('cross_type')
return submission_report_url(submission, bool(warning), file_type, cross_type)
@app.route("/v1/get_file_url/", methods=['GET'])
@convert_to_submission_id
@requires_submission_perms('reader')
@use_kwargs({
'file_type': webargs_fields.String(
required=True,
validate=webargs_validate.OneOf(FILE_TYPE_DICT_LETTER.values())
)
})
def get_file_url(submission, file_type):
return get_upload_file_url(submission, file_type)
@app.route("/v1/get_detached_file_url/", methods=['GET'])
@requires_login
@use_kwargs({
'job_id': webargs_fields.Int(required=True)
})
def get_detached_file_url(job_id):
return get_detached_upload_file_url(job_id)
@app.route("/v1/delete_submission/", methods=['POST'])
@convert_to_submission_id
@requires_submission_perms('writer', check_fabs='editfabs')
def delete_submission(submission):
""" Deletes all data associated with the specified submission
NOTE: THERE IS NO WAY TO UNDO THIS
"""
return delete_all_submission_data(submission)
@app.route("/v1/published_submissions/", methods=["GET"])
@requires_login
@use_kwargs({'reporting_fiscal_year': webargs_fields.String(required=True),
'reporting_fiscal_period': webargs_fields.String(required=True),
'cgac_code': webargs_fields.String(),
'frec_code': webargs_fields.String(),
'is_quarter': webargs_fields.Bool()})
def get_published_submissions(reporting_fiscal_year, reporting_fiscal_period, **kwargs):
""" Check if cgac (or frec) code, year, and quarter already has a published submission """
cgac_code = kwargs.get('cgac_code')
frec_code = kwargs.get('frec_code')
is_quarter = kwargs.get('is_quarter')
return get_published_submission_ids(cgac_code, frec_code, reporting_fiscal_year, reporting_fiscal_period,
is_quarter)
@app.route('/v1/publish_dabs_submission/', methods=['POST'])
@convert_to_submission_id
@requires_submission_perms('submitter', check_owner=False)
def publish_dabs_sub(submission):
file_manager = FileHandler(request, is_local=is_local, server_path=server_path)
return publish_dabs_submission(submission, file_manager)
@app.route("/v1/certify_dabs_submission/", methods=['POST'])
@convert_to_submission_id
@requires_submission_perms('submitter', check_owner=False)
def certify_dabs_sub(submission):
return certify_dabs_submission(submission)
@app.route('/v1/publish_and_certify_dabs_submission/', methods=['POST'])
@convert_to_submission_id
@requires_submission_perms('submitter', check_owner=False)
def publish_and_certify_dabs_sub(submission):
file_manager = FileHandler(request, is_local=is_local, server_path=server_path)
return publish_and_certify_dabs_submission(submission, file_manager)
@app.route("/v1/restart_validation/", methods=['POST'])
@convert_to_submission_id
@requires_submission_perms('writer', check_fabs='editfabs')
@use_kwargs({'is_fabs': webargs_fields.Bool(missing=False)})
def restart_validation(submission, **kwargs):
is_fabs = kwargs.get('is_fabs')
return FileHandler.restart_validation(submission, is_fabs)
@app.route("/v1/revert_submission/", methods=['POST'])
@convert_to_submission_id
@requires_submission_perms('submitter')
def revert_submission(submission):
""" Revert an updated DABS submission to the state it was when it was last published """
file_manager = FileHandler(request, is_local=is_local, server_path=server_path)
return revert_to_published(submission, file_manager)
|
cc0-1.0
|
c3dcf5ecda1eb8bff5efd4c6511c1a3e
| 45.408497
| 120
| 0.67108
| 3.746966
| false
| false
| false
| false
|
fedspendingtransparency/data-act-broker-backend
|
dataactcore/migrations/versions/3b9dffe063a6_create_zips_grouped_table_and_add_.py
|
1
|
2745
|
"""Create zips_grouped table and add multicolumn index to zips table
Revision ID: 3b9dffe063a6
Revises: be4dcb9eede6
Create Date: 2020-08-03 11:48:55.068356
"""
# revision identifiers, used by Alembic.
revision = '3b9dffe063a6'
down_revision = 'be4dcb9eede6'
branch_labels = None
depends_on = None
from alembic import op
import sqlalchemy as sa
def upgrade(engine_name):
globals()["upgrade_%s" % engine_name]()
def downgrade(engine_name):
globals()["downgrade_%s" % engine_name]()
def upgrade_data_broker():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('zips_grouped',
sa.Column('created_at', sa.DateTime(), nullable=True),
sa.Column('updated_at', sa.DateTime(), nullable=True),
sa.Column('zips_grouped_id', sa.Integer(), nullable=False),
sa.Column('zip5', sa.Text(), nullable=True),
sa.Column('state_abbreviation', sa.Text(), nullable=True),
sa.Column('county_number', sa.Text(), nullable=True),
sa.Column('congressional_district_no', sa.Text(), nullable=True),
sa.PrimaryKeyConstraint('zips_grouped_id')
)
op.create_index(op.f('ix_zips_grouped_zip5'), 'zips_grouped', ['zip5'], unique=False)
op.create_index('ix_zips_zip5_state_abbreviation_county_number', 'zips', ['zip5', 'state_abbreviation', 'county_number'], unique=False)
# Populating the new table
op.execute("""
INSERT INTO zips_grouped (zip5, state_abbreviation, county_number)
SELECT zip5, state_abbreviation, county_number
FROM zips
GROUP BY zip5, state_abbreviation, county_number;
WITH district_counts AS (
SELECT zip5, COUNT(DISTINCT zips.congressional_district_no) AS cd_count
FROM zips
GROUP BY zip5)
UPDATE zips_grouped
SET created_at = NOW(),
updated_at = NOW(),
congressional_district_no = CASE WHEN cd_count <> 1
THEN '90'
END
FROM district_counts AS dc
WHERE dc.zip5 = zips_grouped.zip5;
UPDATE zips_grouped
SET congressional_district_no = zips.congressional_district_no
FROM zips
WHERE zips_grouped.congressional_district_no IS NULL
AND zips.zip5 = zips_grouped.zip5;
""")
# ### end Alembic commands ###
def downgrade_data_broker():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_index('ix_zips_zip5_state_abbreviation_county_number', table_name='zips')
op.drop_index(op.f('ix_zips_grouped_zip5'), table_name='zips_grouped')
op.drop_table('zips_grouped')
# ### end Alembic commands ###
|
cc0-1.0
|
3be9db389d7f7fc2475573d2df485544
| 33.3125
| 139
| 0.62623
| 3.465909
| false
| false
| false
| false
|
fedspendingtransparency/data-act-broker-backend
|
dataactcore/migrations/versions/1e0b1d3e3cca_d_model_to_text.py
|
2
|
3725
|
"""d-model-to-text
Revision ID: 1e0b1d3e3cca
Revises: 1ae491ca0925
Create Date: 2016-09-08 09:34:56.153584
"""
# revision identifiers, used by Alembic.
revision = '1e0b1d3e3cca'
down_revision = '1ae491ca0925'
branch_labels = None
depends_on = None
from alembic import op
import sqlalchemy as sa
def upgrade(engine_name):
globals()["upgrade_%s" % engine_name]()
def downgrade(engine_name):
globals()["downgrade_%s" % engine_name]()
def upgrade_data_broker():
### commands auto generated by Alembic - please adjust! ###
op.alter_column('award_financial_assistance', 'face_value_loan_guarantee',
existing_type=sa.NUMERIC(),
type_=sa.Text(),
existing_nullable=True)
op.alter_column('award_financial_assistance', 'federal_action_obligation',
existing_type=sa.NUMERIC(),
type_=sa.Text(),
existing_nullable=True)
op.alter_column('award_financial_assistance', 'non_federal_funding_amount',
existing_type=sa.NUMERIC(),
type_=sa.Text(),
existing_nullable=True)
op.alter_column('award_financial_assistance', 'original_loan_subsidy_cost',
existing_type=sa.NUMERIC(),
type_=sa.Text(),
existing_nullable=True)
op.alter_column('award_financial_assistance', 'record_type',
existing_type=sa.INTEGER(),
type_=sa.Text(),
existing_nullable=True)
op.alter_column('award_financial_assistance', 'total_funding_amount',
existing_type=sa.NUMERIC(),
type_=sa.Text(),
existing_nullable=True)
op.alter_column('award_procurement', 'current_total_value_award',
existing_type=sa.NUMERIC(),
type_=sa.Text(),
existing_nullable=True)
op.alter_column('award_procurement', 'potential_total_value_awar',
existing_type=sa.NUMERIC(),
type_=sa.Text(),
existing_nullable=True)
### end Alembic commands ###
def downgrade_data_broker():
### commands auto generated by Alembic - please adjust! ###
op.alter_column('award_procurement', 'potential_total_value_awar',
existing_type=sa.Text(),
type_=sa.NUMERIC(),
existing_nullable=True)
op.alter_column('award_procurement', 'current_total_value_award',
existing_type=sa.Text(),
type_=sa.NUMERIC(),
existing_nullable=True)
op.alter_column('award_financial_assistance', 'total_funding_amount',
existing_type=sa.Text(),
type_=sa.NUMERIC(),
existing_nullable=True)
op.alter_column('award_financial_assistance', 'record_type',
existing_type=sa.Text(),
type_=sa.INTEGER(),
existing_nullable=True)
op.alter_column('award_financial_assistance', 'original_loan_subsidy_cost',
existing_type=sa.Text(),
type_=sa.NUMERIC(),
existing_nullable=True)
op.alter_column('award_financial_assistance', 'non_federal_funding_amount',
existing_type=sa.Text(),
type_=sa.NUMERIC(),
existing_nullable=True)
op.alter_column('award_financial_assistance', 'federal_action_obligation',
existing_type=sa.Text(),
type_=sa.NUMERIC(),
existing_nullable=True)
op.alter_column('award_financial_assistance', 'face_value_loan_guarantee',
existing_type=sa.Text(),
type_=sa.NUMERIC(),
existing_nullable=True)
### end Alembic commands ###
|
cc0-1.0
|
74726afb40f8e62139b8483759fa6a92
| 35.519608
| 79
| 0.587114
| 3.770243
| false
| false
| false
| false
|
fedspendingtransparency/data-act-broker-backend
|
dataactbroker/helpers/filters_helper.py
|
1
|
6379
|
from sqlalchemy import or_, and_
from flask import g
from dataactcore.models.lookups import FILE_TYPE_DICT_LETTER_ID, RULE_SEVERITY_DICT
from dataactcore.models.domainModels import CGAC, FREC
from dataactcore.models.errorModels import PublishedErrorMetadata, ErrorMetadata
from dataactcore.models.jobModels import Submission
from dataactcore.models.validationModels import RuleSql, RuleSetting
from dataactcore.utils.responseException import ResponseException
from dataactcore.utils.statusCode import StatusCode
def agency_filter(sess, query, cgac_model, frec_model, agency_list):
""" Given the provided query, add a filter by agencies provided the agency list. Note that this does not include
the additional permissions filter listed in this file.
Arguments:
sess: the database connection
query: the sqlalchemy query to apply the filters to
cgac_model: the model to apply the cgacs filter
frec_model: the model to apply the frecs filter
agency_list: list of strings representing the agency codes to filter with
Raises:
ResponseException: if any of the strings in the agency_list are invalid
Returns:
the same queryset provided with agency filters included
"""
agency_filters = []
cgac_codes = [cgac_code for cgac_code in agency_list if isinstance(cgac_code, str) and len(cgac_code) == 3]
frec_codes = [frec_code for frec_code in agency_list if isinstance(frec_code, str) and len(frec_code) == 4]
if len(cgac_codes) + len(frec_codes) != len(agency_list):
raise ResponseException('All codes in the agency_codes filter must be valid agency codes',
StatusCode.CLIENT_ERROR)
# If the number of CGACs or FRECs returned from a query using the codes doesn't match the length of
# each list (ignoring duplicates) then something included wasn't a valid agency
cgac_list = set(cgac_codes)
frec_list = set(frec_codes)
if (cgac_list and sess.query(CGAC).filter(CGAC.cgac_code.in_(cgac_list)).count() != len(cgac_list)) or \
(frec_list and sess.query(FREC).filter(FREC.frec_code.in_(frec_list)).count() != len(frec_list)):
raise ResponseException("All codes in the agency_codes filter must be valid agency codes",
StatusCode.CLIENT_ERROR)
if len(cgac_list) > 0:
agency_filters.append(cgac_model.cgac_code.in_(cgac_list))
if len(frec_list) > 0:
agency_filters.append(frec_model.frec_code.in_(frec_list))
return query.filter(or_(*agency_filters))
def permissions_filter(query):
""" Given the provided query, add a filter to only include agencies the user has access to.
Arguments:
query: the sqlalchemy query to apply the filters to
Returns:
the same queryset provided with permissions filter included
"""
if not g.user.website_admin:
affiliation_filters = []
cgac_codes = [aff.cgac.cgac_code for aff in g.user.affiliations if aff.cgac]
frec_codes = [aff.frec.frec_code for aff in g.user.affiliations if aff.frec]
affiliation_filters.append(Submission.user_id == g.user.user_id)
if cgac_codes:
affiliation_filters.append(Submission.cgac_code.in_(cgac_codes))
if frec_codes:
affiliation_filters.append(Submission.frec_code.in_(frec_codes))
query = query.filter(or_(*affiliation_filters))
return query
def file_filter(query, file_model, files):
""" Given the provided query, add a filter by files provided the files list.
Arguments:
query: the sqlalchemy query to apply the filters to
file_model: the model to apply the file filter
files: list of files representing the agency codes to filter with
Returns:
the same queryset provided with file filters included
"""
model_file_type_id = {
PublishedErrorMetadata: 'file_type_id',
ErrorMetadata: 'file_type_id',
RuleSql: 'file_id',
RuleSetting: 'file_id'
}
if file_model not in model_file_type_id:
valid_file_models = [model_file_type.__name__ for model_file_type in model_file_type_id.keys()]
error_message = 'Invalid file model. Use one of the following instead: {}.'
raise ResponseException(error_message.format(', '.join(sorted(valid_file_models))))
file_type_filters = []
if files:
for file_type in files:
file_id = getattr(file_model, model_file_type_id[file_model])
target_file_id = getattr(file_model, 'target_{}'.format(model_file_type_id[file_model]))
if file_type in ['A', 'B', 'C']:
file_type_filters.append(and_(file_id == FILE_TYPE_DICT_LETTER_ID[file_type],
target_file_id.is_(None)))
else:
file_types = file_type.split('-')[1]
# Append both orders of the source/target files to the list
file_type_filters.append(and_(file_id == FILE_TYPE_DICT_LETTER_ID[file_types[:1]],
target_file_id == FILE_TYPE_DICT_LETTER_ID[file_types[1:]]))
file_type_filters.append(and_(file_id == FILE_TYPE_DICT_LETTER_ID[file_types[1:]],
target_file_id == FILE_TYPE_DICT_LETTER_ID[file_types[:1]]))
return query.filter(or_(*file_type_filters))
def rule_severity_filter(query, error_level, error_model=ErrorMetadata):
""" Given the provided query, add a filter by files provided the files list.
Arguments:
query: the sqlalchemy query to apply the filters to
error_level: the error level to filter on (could be 'error' or 'warning')
error_model: the model to apply the filter to (must have a severity_id field)
Returns:
the same queryset provided with rule severity filter included
"""
# If the error level isn't "mixed" add a filter on which severity to pull
if error_level == 'error':
query = query.filter(error_model.severity_id == RULE_SEVERITY_DICT['fatal'])
elif error_level == 'warning':
query = query.filter(error_model.severity_id == RULE_SEVERITY_DICT['warning'])
return query
|
cc0-1.0
|
20df9b222bf9caa9aa5f5cb7b4421671
| 45.904412
| 116
| 0.649318
| 3.806086
| false
| false
| false
| false
|
fedspendingtransparency/data-act-broker-backend
|
dataactcore/migrations/versions/a98cd1871ea9_remove_user_status_id_from_user.py
|
1
|
1073
|
"""remove user_status_id from user
Revision ID: a98cd1871ea9
Revises: e97127c44797
Create Date: 2016-12-22 11:59:35.173573
"""
# revision identifiers, used by Alembic.
revision = 'a98cd1871ea9'
down_revision = 'e97127c44797'
branch_labels = None
depends_on = None
from alembic import op
import sqlalchemy as sa
def upgrade(engine_name):
globals()["upgrade_%s" % engine_name]()
def downgrade(engine_name):
globals()["downgrade_%s" % engine_name]()
def upgrade_data_broker():
### commands auto generated by Alembic - please adjust! ###
op.drop_constraint('users_user_status_id_fkey', 'users', type_='foreignkey')
op.drop_column('users', 'user_status_id')
### end Alembic commands ###
def downgrade_data_broker():
### commands auto generated by Alembic - please adjust! ###
op.add_column('users', sa.Column('user_status_id', sa.INTEGER(), autoincrement=False, nullable=True))
op.create_foreign_key('users_user_status_id_fkey', 'users', 'user_status', ['user_status_id'], ['user_status_id'])
### end Alembic commands ###
|
cc0-1.0
|
98f6512f4c3b8fcbb730ef7c93f83dda
| 24.547619
| 118
| 0.684995
| 3.261398
| false
| false
| false
| false
|
fedspendingtransparency/data-act-broker-backend
|
tests/unit/dataactvalidator/test_script_read_zips.py
|
1
|
6618
|
from io import StringIO
from dataactvalidator.scripts.read_zips import update_state_congr_table_census, group_zips
from dataactcore.models.domainModels import StateCongressional, Zips, ZipsGrouped
def test_parse_census_district_file(database):
census_file_mock = StringIO("""state_code,congressional_district_no,census_year\nA,1,20""")
sess = database.session
update_state_congr_table_census(census_file_mock, sess)
state_congressional = sess.query(StateCongressional).one_or_none()
assert state_congressional.state_code == 'A'
assert state_congressional.congressional_district_no == '01'
assert state_congressional.census_year == 20
def test_group_zips(database):
""" Testing the grouping of zips. """
sess = database.session
# Only difference is the zip_last4, these will be merged together
zip_same1 = Zips(zip5='12345', zip_last4='6789', state_abbreviation='VA', county_number='000',
congressional_district_no='01')
zip_same2 = Zips(zip5='12345', zip_last4='6780', state_abbreviation='VA', county_number='000',
congressional_district_no='01')
# Different states, same everything else
zip_state1 = Zips(zip5='54321', zip_last4='6789', state_abbreviation='VA', county_number='000',
congressional_district_no='01')
zip_state2 = Zips(zip5='54321', zip_last4='6780', state_abbreviation='WA', county_number='000',
congressional_district_no='01')
# Different county codes, same everything else
zip_county1 = Zips(zip5='11111', zip_last4='1111', state_abbreviation='VA', county_number='000',
congressional_district_no='01')
zip_county2 = Zips(zip5='11111', zip_last4='1112', state_abbreviation='VA', county_number='001',
congressional_district_no='01')
# Everything matches except for congressional district
zip_cd1 = Zips(zip5='22222', zip_last4='2222', state_abbreviation='VA', county_number='000',
congressional_district_no='01')
zip_cd2 = Zips(zip5='22222', zip_last4='2223', state_abbreviation='VA', county_number='000',
congressional_district_no='02')
# Different states, different congressional district
zip_state_cd1 = Zips(zip5='33333', zip_last4='3333', state_abbreviation='VA', county_number='000',
congressional_district_no='01')
zip_state_cd2 = Zips(zip5='33333', zip_last4='3334', state_abbreviation='WA', county_number='000',
congressional_district_no='02')
# Null congressional district
zip_null_cd = Zips(zip5='44444', zip_last4='4444', state_abbreviation='WA', county_number='000',
congressional_district_no=None)
sess.add_all([zip_same1, zip_same2, zip_state1, zip_state2, zip_county1, zip_county2, zip_cd1, zip_cd2,
zip_state_cd1, zip_state_cd2, zip_null_cd])
sess.commit()
# Creating the temp tables to use for testing
sess.execute("""
CREATE TABLE temp_zips AS
SELECT * FROM zips;
CREATE TABLE temp_zips_grouped (LIKE zips_grouped INCLUDING ALL);
""")
sess.commit()
group_zips(sess)
# Moving into zips_grouped for easier parsing
sess.execute("""
INSERT INTO zips_grouped
SELECT *
FROM temp_zips_grouped
""")
sess.commit()
# Combined first set of zips
zips = sess.query(ZipsGrouped).filter_by(zip5=zip_same1.zip5).all()
assert len(zips) == 1
assert zips[0].zip5 == zip_same1.zip5
assert zips[0].state_abbreviation == zip_same1.state_abbreviation
assert zips[0].county_number == zip_same1.county_number
assert zips[0].congressional_district_no == zip_same1.congressional_district_no
# Different states, same everything else
zips = sess.query(ZipsGrouped).filter_by(zip5=zip_state1.zip5).order_by(ZipsGrouped.state_abbreviation).all()
assert len(zips) == 2
assert zips[0].zip5 == zip_state1.zip5
assert zips[0].state_abbreviation == zip_state1.state_abbreviation
assert zips[0].county_number == zip_state1.county_number
assert zips[0].congressional_district_no == zip_state1.congressional_district_no
assert zips[1].zip5 == zip_state2.zip5
assert zips[1].state_abbreviation == zip_state2.state_abbreviation
assert zips[1].county_number == zip_state2.county_number
assert zips[1].congressional_district_no == zip_state2.congressional_district_no
# Different counties, same everything else
zips = sess.query(ZipsGrouped).filter_by(zip5=zip_county1.zip5).order_by(ZipsGrouped.county_number).all()
assert len(zips) == 2
assert zips[0].zip5 == zip_county1.zip5
assert zips[0].state_abbreviation == zip_county1.state_abbreviation
assert zips[0].county_number == zip_county1.county_number
assert zips[0].congressional_district_no == zip_county1.congressional_district_no
assert zips[1].zip5 == zip_county2.zip5
assert zips[1].state_abbreviation == zip_county2.state_abbreviation
assert zips[1].county_number == zip_county2.county_number
assert zips[1].congressional_district_no == zip_county2.congressional_district_no
# Different congressional districts
zips = sess.query(ZipsGrouped).filter_by(zip5=zip_cd1.zip5).all()
assert len(zips) == 1
assert zips[0].zip5 == zip_cd1.zip5
assert zips[0].state_abbreviation == zip_cd1.state_abbreviation
assert zips[0].county_number == zip_cd1.county_number
assert zips[0].congressional_district_no == '90'
# Different states, different congressional districts
zips = sess.query(ZipsGrouped).filter_by(zip5=zip_state_cd1.zip5).order_by(ZipsGrouped.state_abbreviation).all()
assert len(zips) == 2
assert zips[0].zip5 == zip_state_cd1.zip5
assert zips[0].state_abbreviation == zip_state_cd1.state_abbreviation
assert zips[0].county_number == zip_state_cd1.county_number
assert zips[0].congressional_district_no == '90'
assert zips[1].zip5 == zip_state_cd2.zip5
assert zips[1].state_abbreviation == zip_state_cd2.state_abbreviation
assert zips[1].county_number == zip_state_cd2.county_number
assert zips[1].congressional_district_no == '90'
# Null congressional district
zips = sess.query(ZipsGrouped).filter_by(zip5=zip_null_cd.zip5).all()
assert len(zips) == 1
assert zips[0].zip5 == zip_null_cd.zip5
assert zips[0].state_abbreviation == zip_null_cd.state_abbreviation
assert zips[0].county_number == zip_null_cd.county_number
assert zips[0].congressional_district_no == '90'
|
cc0-1.0
|
0e8fb45dd235711f6bef273140a381bf
| 47.306569
| 116
| 0.68359
| 3.079572
| false
| false
| false
| false
|
fedspendingtransparency/data-act-broker-backend
|
dataactcore/migrations/versions/c4d42e86c655_add_descriptions_to_published_.py
|
1
|
3105
|
"""Add descriptions to (Published)AwardFinancialAssistance
Revision ID: c4d42e86c655
Revises: 668d9fa93acb
Create Date: 2018-04-04 11:00:18.103961
"""
# revision identifiers, used by Alembic.
revision = 'c4d42e86c655'
down_revision = '668d9fa93acb'
branch_labels = None
depends_on = None
from alembic import op
import sqlalchemy as sa
def upgrade(engine_name):
globals()["upgrade_%s" % engine_name]()
def downgrade(engine_name):
globals()["downgrade_%s" % engine_name]()
def upgrade_data_broker():
### commands auto generated by Alembic - please adjust! ###
op.add_column('award_financial_assistance', sa.Column('action_type_description', sa.Text(), nullable=True))
op.add_column('award_financial_assistance', sa.Column('assistance_type_desc', sa.Text(), nullable=True))
op.add_column('award_financial_assistance', sa.Column('business_funds_ind_desc', sa.Text(), nullable=True))
op.add_column('award_financial_assistance', sa.Column('business_types_desc', sa.Text(), nullable=True))
op.add_column('award_financial_assistance', sa.Column('correction_delete_ind_desc', sa.Text(), nullable=True))
op.add_column('award_financial_assistance', sa.Column('record_type_description', sa.Text(), nullable=True))
op.add_column('published_award_financial_assistance', sa.Column('action_type_description', sa.Text(), nullable=True))
op.add_column('published_award_financial_assistance', sa.Column('assistance_type_desc', sa.Text(), nullable=True))
op.add_column('published_award_financial_assistance', sa.Column('business_funds_ind_desc', sa.Text(), nullable=True))
op.add_column('published_award_financial_assistance', sa.Column('business_types_desc', sa.Text(), nullable=True))
op.add_column('published_award_financial_assistance', sa.Column('correction_delete_ind_desc', sa.Text(), nullable=True))
op.add_column('published_award_financial_assistance', sa.Column('record_type_description', sa.Text(), nullable=True))
### end Alembic commands ###
def downgrade_data_broker():
### commands auto generated by Alembic - please adjust! ###
op.drop_column('published_award_financial_assistance', 'record_type_description')
op.drop_column('published_award_financial_assistance', 'correction_delete_ind_desc')
op.drop_column('published_award_financial_assistance', 'business_types_desc')
op.drop_column('published_award_financial_assistance', 'business_funds_ind_desc')
op.drop_column('published_award_financial_assistance', 'assistance_type_desc')
op.drop_column('published_award_financial_assistance', 'action_type_description')
op.drop_column('award_financial_assistance', 'record_type_description')
op.drop_column('award_financial_assistance', 'correction_delete_ind_desc')
op.drop_column('award_financial_assistance', 'business_types_desc')
op.drop_column('award_financial_assistance', 'business_funds_ind_desc')
op.drop_column('award_financial_assistance', 'assistance_type_desc')
op.drop_column('award_financial_assistance', 'action_type_description')
### end Alembic commands ###
|
cc0-1.0
|
32bf360dee2da2c2da20d37c614e686f
| 49.080645
| 124
| 0.732045
| 3.251309
| false
| false
| false
| false
|
fedspendingtransparency/data-act-broker-backend
|
dataactcore/scripts/backfill_ppop_scope_fabs.py
|
1
|
2049
|
import logging
from dataactcore.interfaces.db import GlobalDB
from dataactcore.broker_logging import configure_logging
from dataactvalidator.health_check import create_app
logger = logging.getLogger(__name__)
BACKFILL_FABS_PPOP_SCOPE_SQL_1 = """
UPDATE published_fabs
SET place_of_performance_scope =
CASE WHEN place_of_performance_code ~ '^00\*{5}$'
THEN 'Multi-state'
WHEN place_of_performance_code ~ '^[a-zA-Z]{2}\*{5}$'
THEN 'State-wide'
WHEN place_of_performance_code ~ '^[a-zA-Z]{2}\*\*\d{3}$'
THEN 'County-wide'
WHEN UPPER(place_of_performance_code) = '00FORGN'
THEN 'Foreign'
WHEN place_of_performance_code ~ '^[a-zA-Z]{2}\d{4}[\dRr]$'
THEN 'City-wide'
END
WHERE (place_of_performance_zip4a IS NULL
AND place_of_performance_scope IS NULL);
"""
BACKFILL_FABS_PPOP_SCOPE_SQL_2 = """
UPDATE published_fabs
SET place_of_performance_scope =
CASE WHEN LOWER(place_of_performance_zip4a) = 'city-wide'
THEN 'City-wide'
WHEN place_of_performance_zip4a ~ '^\d{5}(-?\d{4})?$'
THEN 'Single ZIP Code'
END
WHERE (place_of_performance_code ~ '^[a-zA-Z]{2}\d{4}[\dRr]$'
AND place_of_performance_scope IS NULL);
"""
if __name__ == '__main__':
configure_logging()
with create_app().app_context():
sess = GlobalDB.db().session
affected = 0
logger.info('Backfilling empty place_of_performance_scope values in the fabs table (part i).')
executed = sess.execute(BACKFILL_FABS_PPOP_SCOPE_SQL_1)
affected += executed.rowcount
sess.commit()
logger.info('Backfilling empty place_of_performance_scope values in the fabs table (part ii).')
executed = sess.execute(BACKFILL_FABS_PPOP_SCOPE_SQL_2)
affected += executed.rowcount
sess.commit()
logger.info('Backfill completed, {} rows affected\n'.format(affected))
sess.close()
|
cc0-1.0
|
83b42b368ab3ea3d846adf7ce02c7fa6
| 33.728814
| 103
| 0.607125
| 3.426421
| false
| false
| true
| false
|
fedspendingtransparency/data-act-broker-backend
|
tests/unit/dataactvalidator/test_fabs34_2.py
|
1
|
1873
|
from tests.unit.dataactcore.factories.staging import FABSFactory
from tests.unit.dataactvalidator.utils import number_of_errors, query_columns
_FILE = 'fabs34_2'
def test_column_headers(database):
expected_subset = {'row_number', 'period_of_performance_star', 'period_of_performance_curr',
'uniqueid_AssistanceTransactionUniqueKey'}
actual = set(query_columns(_FILE, database))
assert expected_subset == actual
def test_success(database):
""" While they are optional fields, if either PeriodOfPerformanceCurrentEndDate or PeriodOfPerformanceStartDate is
provided, both fields must be provided.
"""
fabs_1 = FABSFactory(period_of_performance_star=None, period_of_performance_curr=None,
correction_delete_indicatr='')
fabs_2 = FABSFactory(period_of_performance_star='20120724', period_of_performance_curr='20120724',
correction_delete_indicatr='c')
# Ignore correction delete indicator of D
fabs_3 = FABSFactory(period_of_performance_star=None, period_of_performance_curr='20120724',
correction_delete_indicatr='d')
errors = number_of_errors(_FILE, database, models=[fabs_1, fabs_2, fabs_3])
assert errors == 0
def test_failure(database):
""" While they are optional fields, if either PeriodOfPerformanceCurrentEndDate or PeriodOfPerformanceStartDate is
provided, both fields must be provided.
"""
fabs_1 = FABSFactory(period_of_performance_star=None, period_of_performance_curr='20120724',
correction_delete_indicatr='')
fabs_2 = FABSFactory(period_of_performance_star='20120725', period_of_performance_curr=None,
correction_delete_indicatr='C')
errors = number_of_errors(_FILE, database, models=[fabs_1, fabs_2])
assert errors == 2
|
cc0-1.0
|
170a2ca7be7e4e98dd4bb36a50fa7a2a
| 45.825
| 118
| 0.687667
| 3.814664
| false
| true
| false
| false
|
fedspendingtransparency/data-act-broker-backend
|
dataactcore/scripts/populate_published_comments.py
|
1
|
2470
|
import logging
from sqlalchemy import func
from dataactcore.interfaces.db import GlobalDB
from dataactcore.broker_logging import configure_logging
from dataactcore.models.jobModels import Submission, PublishHistory, Comment, PublishedComment
from dataactvalidator.health_check import create_app
from dataactcore.models.userModel import User # noqa
logger = logging.getLogger(__name__)
if __name__ == '__main__':
sess = GlobalDB.db().session
configure_logging()
with create_app().app_context():
logger.info('Moving published comments')
# Create a CTE of the max publish history date for DABS submissions
max_publish_history = sess.query(func.max(PublishHistory.updated_at).label('max_updated_at'),
PublishHistory.submission_id.label('submission_id')). \
join(Submission, PublishHistory.submission_id == Submission.submission_id). \
filter(Submission.is_fabs.is_(False)). \
group_by(PublishHistory.submission_id).cte('max_publish_history')
# Get all comments that were written before the latest publication for all published/updated submissions
publish_history_list = sess.query(Comment). \
join(max_publish_history, max_publish_history.c.submission_id == Comment.submission_id). \
filter(Comment.updated_at < max_publish_history.c.max_updated_at).\
order_by(Comment.submission_id, Comment.file_type_id).all()
# Create a list of comments and a list of all submissions involved
comments_list = []
submissions_list = []
for obj in publish_history_list:
tmp_obj = obj.__dict__
tmp_obj.pop('_sa_instance_state')
tmp_obj.pop('created_at')
tmp_obj.pop('updated_at')
tmp_obj.pop('comment_id')
comments_list.append(tmp_obj)
if tmp_obj['submission_id'] not in submissions_list:
submissions_list.append(tmp_obj['submission_id'])
# Delete all comments from the submissions we're inserting for
sess.query(PublishedComment).filter(PublishedComment.submission_id.in_(submissions_list)).\
delete(synchronize_session=False)
# Save all the objects in the published comment table
sess.bulk_save_objects([PublishedComment(**comment) for comment in comments_list])
sess.commit()
logger.info('Published comments moved')
|
cc0-1.0
|
6b96bedf5afd113f9382c7a8a6deab17
| 41.586207
| 112
| 0.664372
| 4.207836
| false
| false
| false
| false
|
fedspendingtransparency/data-act-broker-backend
|
tests/unit/dataactvalidator/test_fabs38_2_1.py
|
1
|
2802
|
from tests.unit.dataactcore.factories.domain import OfficeFactory
from tests.unit.dataactcore.factories.staging import FABSFactory
from tests.unit.dataactvalidator.utils import number_of_errors, query_columns
_FILE = 'fabs38_2_1'
def test_column_headers(database):
expected_subset = {'row_number', 'funding_office_code', 'uniqueid_AssistanceTransactionUniqueKey'}
actual = set(query_columns(_FILE, database))
assert expected_subset == actual
def test_success(database):
""" Test when provided, FundingOfficeCode must be a valid value from the Federal Hierarchy, including being
designated specifically as a Funding Office in the hierarchy.
"""
office_1 = OfficeFactory(office_code='12345a', contract_funding_office=True,
financial_assistance_funding_office=False)
office_2 = OfficeFactory(office_code='12345b', contract_funding_office=False,
financial_assistance_funding_office=True)
fabs_1 = FABSFactory(funding_office_code='12345a', correction_delete_indicatr='')
# test case insensitive
fabs_2 = FABSFactory(funding_office_code='12345A', correction_delete_indicatr='c')
fabs_3 = FABSFactory(funding_office_code='', correction_delete_indicatr=None)
fabs_4 = FABSFactory(funding_office_code=None, correction_delete_indicatr='C')
# Testing second type of funding office
fabs_5 = FABSFactory(funding_office_code='12345b', correction_delete_indicatr='')
# Ignore correction delete indicator of D
fabs_6 = FABSFactory(funding_office_code='1234567', correction_delete_indicatr='d')
errors = number_of_errors(_FILE, database, models=[office_1, office_2, fabs_1, fabs_2, fabs_3, fabs_4, fabs_5,
fabs_6])
assert errors == 0
def test_failure(database):
""" Test fail when provided, FundingOfficeCode must be a valid value from the Federal Hierarchy, including being
designated specifically as a Funding Office in the hierarchy.
"""
office_1 = OfficeFactory(office_code='123456', contract_funding_office=True,
financial_assistance_funding_office=True)
office_2 = OfficeFactory(office_code='987654', contract_funding_office=False,
financial_assistance_funding_office=False)
fabs_1 = FABSFactory(funding_office_code='12345', correction_delete_indicatr=None)
fabs_2 = FABSFactory(funding_office_code='1234567', correction_delete_indicatr='')
# Test fail if funding office is false even if code matches
fabs_3 = FABSFactory(funding_office_code='987654', correction_delete_indicatr='c')
errors = number_of_errors(_FILE, database, models=[office_1, office_2, fabs_1, fabs_2, fabs_3])
assert errors == 3
|
cc0-1.0
|
d9d7474fc0ff9e6dbe64c7f945598bde
| 48.157895
| 116
| 0.697716
| 3.506884
| false
| true
| false
| false
|
fedspendingtransparency/data-act-broker-backend
|
dataactcore/models/lookups.py
|
1
|
22140
|
# This file defines a series of constants that represent the values used in the
# broker's "helper" tables. Rather than define the values in the db setup scripts
# and then make db calls to lookup the surrogate keys, we'll define everything
# here, in a file that can be used by the db setup scripts *and* the application
# code.
from collections import namedtuple
from dataactcore.models.stagingModels import (AwardFinancialAssistance, AwardFinancial, Appropriation,
ObjectClassProgramActivity, AwardProcurement, FABS)
LookupType = namedtuple('LookupType', ['id', 'name', 'desc'])
LookupFileType = namedtuple('LookupFileType', ['id', 'name', 'desc', 'letter', 'order', 'crossfile', 'model'])
FILE_STATUS = [
LookupType(1, 'complete', 'File has been processed'),
LookupType(2, 'header_error', 'The file has errors in the header row'),
LookupType(3, 'unknown_error', 'An unknown error has occurred with this file'),
LookupType(4, 'single_row_error', 'Error occurred in job manager'),
LookupType(5, 'job_error', 'File has not yet been validated'),
LookupType(6, 'incomplete', 'File has not yet been validated'),
LookupType(7, 'encoding_error', 'File contains invalid characters that could not be validated'),
LookupType(8, 'row_count_error', 'Raw file row count does not match the number of rows validated'),
LookupType(9, 'file_type_error', 'Invalid file type. Valid file types include .csv and .txt')
]
FILE_STATUS_DICT = {item.name: item.id for item in FILE_STATUS}
FILE_STATUS_DICT_ID = {item.id: item.name for item in FILE_STATUS}
ERROR_TYPE = [
LookupType(1, 'type_error', 'The value provided was of the wrong type. Note that all type errors in a line'
' must be fixed before the rest of the validation logic is applied to that line.'),
LookupType(2, 'required_error', 'This field is required for all submissions but was not provided in this row.'),
LookupType(3, 'value_error', 'The value provided was invalid.'),
LookupType(4, 'read_error', 'Could not parse this record correctly.'),
LookupType(5, 'write_error', 'Could not write this record into the staging table.'),
LookupType(6, 'rule_failed', 'A rule failed for this value.'),
LookupType(7, 'length_error', 'Value was longer than allowed length.'),
LookupType(8, 'field_format_error', 'Date should follow the YYYYMMDD format.'),
LookupType(9, 'blank_file_error', 'File does not contain data. For files A and B, this must be addressed prior to'
' publication/certification. Blank file C does not prevent'
' publication/certification.')
]
ERROR_TYPE_DICT = {item.name: item.id for item in ERROR_TYPE}
JOB_STATUS = [
LookupType(1, 'waiting', 'check dependency table'),
LookupType(2, 'ready', 'can be assigned'),
LookupType(3, 'running', 'job is currently in progress'),
LookupType(4, 'finished', 'job is complete'),
LookupType(5, 'invalid', 'job is invalid'),
LookupType(6, 'failed', 'job failed to complete')
]
JOB_STATUS_DICT = {item.name: item.id for item in JOB_STATUS}
JOB_STATUS_DICT_ID = {item.id: item.name for item in JOB_STATUS}
JOB_TYPE = [
LookupType(1, 'file_upload', 'file must be uploaded to S3'),
LookupType(2, 'csv_record_validation', 'do record level validation and add to staging table'),
LookupType(4, 'validation', 'new information must be validated')
]
JOB_TYPE_DICT = {item.name: item.id for item in JOB_TYPE}
JOB_TYPE_DICT_ID = {item.id: item.name for item in JOB_TYPE}
PUBLISH_STATUS = [
LookupType(1, 'unpublished', 'Has not yet been moved to data store'),
LookupType(2, 'published', 'Has been moved to data store'),
LookupType(3, 'updated', 'Submission was updated after being published'),
LookupType(4, 'publishing', 'Submission is being published'),
LookupType(5, 'reverting', 'Submission is being reverted to certified status')
]
PUBLISH_STATUS_DICT = {item.name: item.id for item in PUBLISH_STATUS}
PUBLISH_STATUS_DICT_ID = {item.id: item.name for item in PUBLISH_STATUS}
FILE_TYPE = [
LookupFileType(1, 'appropriations', '', 'A', 1, True, Appropriation),
LookupFileType(2, 'program_activity', '', 'B', 2, True, ObjectClassProgramActivity),
LookupFileType(3, 'award_financial', '', 'C', 3, True, AwardFinancial),
LookupFileType(4, 'award', '', 'D2', 4, True, AwardFinancialAssistance),
LookupFileType(5, 'award_procurement', '', 'D1', 5, True, AwardProcurement),
LookupFileType(6, 'executive_compensation', '', 'E', None, False, None),
LookupFileType(7, 'sub_award', '', 'F', None, False, None),
LookupFileType(8, 'fabs', '', 'FABS', None, False, FABS)
]
FILE_TYPE_DICT = {item.name: item.id for item in FILE_TYPE}
FILE_TYPE_DICT_ID = {item.id: item.name for item in FILE_TYPE}
FILE_TYPE_DICT_LETTER = {item.id: item.letter for item in FILE_TYPE}
FILE_TYPE_DICT_LETTER_ID = {item.letter: item.id for item in FILE_TYPE}
FILE_TYPE_DICT_LETTER_NAME = {item.letter: item.name for item in FILE_TYPE}
FILE_TYPE_DICT_NAME_LETTER = {item.name: item.letter for item in FILE_TYPE}
SUBMISSION_FILENAMES = {
'A': 'SubID-{submission_id}_File-A_{FYP}_{raw_filename}_{timestamp}{ext}',
'B': 'SubID-{submission_id}_File-B_{FYP}_{raw_filename}_{timestamp}{ext}',
'C': 'SubID-{submission_id}_File-C_{FYP}_{raw_filename}_{timestamp}{ext}',
'D1': 'SubID-{submission_id}_File-D1_{FYP}_{start}_{end}_{agency_type}_{timestamp}{ext}',
'D2': 'SubID-{submission_id}_File-D2_{FYP}_{start}_{end}_{agency_type}_{timestamp}{ext}',
'E': 'SubID-{submission_id}_File-E_{FYP}_{timestamp}.csv',
'F': 'SubID-{submission_id}_File-F_{FYP}_{timestamp}.csv',
'FABS': 'SubID-{submission_id}_File-FABS_{raw_filename}_{timestamp}{ext}',
'FABS_publish': 'SubID-{submission_id}_Published-FABS_{timestamp}.csv',
}
DETACHED_FILENAMES = {
'A': 'File-A_{FYP}_{timestamp}.csv',
'D1': 'File-D1_{start}_{end}_{agency_type}_{timestamp}{ext}',
'D2': 'File-D2_{start}_{end}_{agency_type}_{timestamp}{ext}'
}
REPORT_FILENAMES = {
'PRE-DAIMS 2.0': {
'file': 'submission_{submission_id}_{file_type}_{report_type}_report.csv',
'cross-file': 'submission_{submission_id}_cross_{report_type}{file_type}_{cross_type}.csv'
},
'DAIMS 2.0': {
'file': 'submission_{submission_id}_File_{file_letter}_{file_type}_{report_type}report.csv',
'cross-file': 'submission_{submission_id}_crossfile_{report_type}'
'File_{file_letter}_to_{cross_letter}_{file_type}_{cross_type}.csv',
},
'DEV-8325': {
'file': 'SubID-{submission_id}_File-{file_letter}-{report_type}-report{FYP}.csv',
'cross-file': 'SubID-{submission_id}_File-{file_letter}-to-{cross_letter}-crossfile-{report_type}-report'
'{FYP}.csv',
}
}
PERMISSION_TYPES = [
LookupType(1, 'reader', 'This user is allowed to view any submission for their agency'),
LookupType(2, 'writer', 'This user is allowed to create and edit any submission for their agency'),
LookupType(3, 'submitter', 'This user is allowed to certify and submit any submission for their agency'),
LookupType(4, 'editfabs', 'This user is allowed to create and edit any FABS data for their agency'),
LookupType(5, 'fabs', 'This user is allowed to publish any FABS data for their agency')
]
PERMISSION_TYPE_DICT = {item.name: item.id for item in PERMISSION_TYPES[:3]}
ALL_PERMISSION_TYPES_DICT = {item.name: item.id for item in PERMISSION_TYPES}
PERMISSION_TYPE_DICT_ID = {item.id: item.name for item in PERMISSION_TYPES}
PERMISSION_SHORT_DICT = {item.name[0]: item.id for item in PERMISSION_TYPES}
# These are split into DABS and FABS permissions but having DABS permissions gives read-access to FABS submissions
DABS_PERMISSION_ID_LIST = [item.id for item in PERMISSION_TYPES[:3]]
FABS_PERMISSION_ID_LIST = [item.id for item in PERMISSION_TYPES[3:]]
# These are split into groups between DABS and FABS (not to be confused with just DABS writer/submitter)
WRITER_ID_LIST = [item.id for item in PERMISSION_TYPES[1:]]
SUBMITTER_ID_LIST = [item.id for item in [PERMISSION_TYPES[2], PERMISSION_TYPES[4]]]
FIELD_TYPE = [
LookupType(1, 'INT', 'integer type'),
LookupType(2, 'DECIMAL', 'decimal type '),
LookupType(3, 'BOOLEAN', 'yes/no'),
LookupType(4, 'STRING', 'string type'),
LookupType(5, 'LONG', 'long integer'),
LookupType(6, 'DATE', 'date type')
]
FIELD_TYPE_DICT = {item.name: item.id for item in FIELD_TYPE}
FIELD_TYPE_DICT_ID = {item.id: item.name for item in FIELD_TYPE}
RULE_SEVERITY = [
LookupType(1, 'warning', 'warning'),
LookupType(2, 'fatal', 'fatal error')
]
RULE_SEVERITY_DICT = {item.name: item.id for item in RULE_SEVERITY}
RULE_IMPACT = [
LookupType(1, 'low', 'low'),
LookupType(2, 'medium', 'medium'),
LookupType(3, 'high', 'high')
]
RULE_IMPACT_DICT = {item.name: item.id for item in RULE_IMPACT}
RULE_IMPACT_DICT_ID = {item.id: item.name for item in RULE_IMPACT}
SUBMISSION_TYPE = [
LookupType(1, 'all', 'Warning for all pages'),
LookupType(2, 'dabs', 'Warning for DABS pages'),
LookupType(3, 'fabs', 'Warning for FABS pages'),
LookupType(4, 'login', 'Warning for the login page')
]
SUBMISSION_TYPE_DICT = {item.name: item.id for item in SUBMISSION_TYPE}
ACTION_TYPE = [
LookupType(1, 'A', 'New'),
LookupType(2, 'B', 'Continuation'),
LookupType(3, 'C', 'Revision'),
LookupType(4, 'D', 'Adjustment to Completed Project'),
LookupType(4, 'E', 'Aggregate Mixed')
]
ACTION_TYPE_DICT = {item.name: item.desc for item in ACTION_TYPE}
ASSISTANCE_TYPE = [
LookupType(1, '02', 'block grant (A)'),
LookupType(2, '03', 'formula grant (A)'),
LookupType(3, '04', 'project grant (B)'),
LookupType(4, '05', 'cooperative agreement (B)'),
LookupType(5, '06', 'direct payment for specified use, as a subsidy or other non-reimbursable direct financial aid '
'(C)'),
LookupType(6, '07', 'direct loan (E)'),
LookupType(7, '08', 'guaranteed/insured loan (F)'),
LookupType(8, '09', 'insurance (G)'),
LookupType(9, '10', 'direct payment with unrestricted use (retirement, pension, veterans benefits, etc.) (D)'),
LookupType(10, '11', 'other reimbursable, contingent, intangible, or indirect financial assistance'),
]
ASSISTANCE_TYPE_DICT = {item.name: item.desc for item in ASSISTANCE_TYPE}
CORRECTION_DELETE_IND = [
LookupType(1, 'C', 'Correct an Existing Record'),
LookupType(2, 'D', 'Delete an Existing Record')
]
CORRECTION_DELETE_IND_DICT = {item.name: item.desc for item in CORRECTION_DELETE_IND}
RECORD_TYPE = [
LookupType(1, 1, 'Aggregate Record'),
LookupType(2, 2, 'Non-Aggregate Record'),
LookupType(3, 3, 'Non-Aggregate Record to an Individual Recipient (PII-Redacted)')
]
RECORD_TYPE_DICT = {item.name: item.desc for item in RECORD_TYPE}
BUSINESS_TYPE = [
LookupType(1, 'A', 'State Government'),
LookupType(2, 'B', 'County Government'),
LookupType(3, 'C', 'City or Township Government'),
LookupType(4, 'D', 'Special District Government'),
LookupType(5, 'E', 'Regional Organization'),
LookupType(6, 'F', 'U.S. Territory or Possession'),
LookupType(7, 'G', 'Independent School District'),
LookupType(8, 'H', 'Public/State Controlled Institution of Higher Education'),
LookupType(9, 'I', 'Indian/Native American Tribal Government (Federally-Recognized)'),
LookupType(10, 'J', 'Indian/Native American Tribal Government (Other than Federally-Recognized)'),
LookupType(11, 'K', 'Indian/Native American Tribal Designated Organization'),
LookupType(12, 'L', 'Public/Indian Housing Authority'),
LookupType(13, 'M', 'Nonprofit with 501C3 IRS Status (Other than an Institution of Higher Education)'),
LookupType(14, 'N', 'Nonprofit without 501C3 IRS Status (Other than an Institution of Higher Education)'),
LookupType(15, 'O', 'Private Institution of Higher Education'),
LookupType(16, 'P', 'Individual'),
LookupType(17, 'Q', 'For-Profit Organization (Other than Small Business)'),
LookupType(18, 'R', 'Small Business'),
LookupType(19, 'S', 'Hispanic-serving Institution'),
LookupType(20, 'T', 'Historically Black College or University (HBCU)'),
LookupType(21, 'U', 'Tribally Controlled College or University (TCCU)'),
LookupType(22, 'V', 'Alaska Native and Native Hawaiian Serving Institutions'),
LookupType(23, 'W', 'Non-domestic (non-U.S.) Entity'),
LookupType(24, 'X', 'Other')
]
BUSINESS_TYPE_DICT = {item.name: item.desc for item in BUSINESS_TYPE}
SAM_BUSINESS_TYPE = [
LookupType(1, 'A6', 'SBA Certified 8(a), Program Participant'),
LookupType(2, 'JT', 'SBA Certified 8(a), Joint Venture'),
LookupType(3, 'XX', 'SBA Certified HUBZone Small Business Concern'),
LookupType(4, 'A7', 'AbilityOne Non Profit Agency'),
LookupType(5, '2R', 'U.S. Federal Government'),
LookupType(6, '2F', 'U.S. State Government'),
LookupType(7, '12', 'U.S. Local Government'),
LookupType(8, '3I', 'Tribal Government'),
LookupType(9, 'CY', 'Foreign Government'),
LookupType(10, '20', 'Foreign Owned'),
LookupType(11, '1D', 'Small Agricultural Cooperative'),
LookupType(12, 'LJ', 'Limited Liability Company'),
LookupType(13, 'XS', 'Subchapter S Corporation'),
LookupType(14, 'MF', 'Manufacturer of Goods'),
LookupType(15, '2X', 'For Profit Organization'),
LookupType(16, 'A8', 'Non-Profit Organization'),
LookupType(17, '2U', 'Other Not For Profit Organization'),
LookupType(18, 'HK', 'Community Development Corporation Owned Firm'),
LookupType(19, 'A3', 'Labor Surplus Area Firm'),
LookupType(20, 'A5', 'Veteran Owned Business'),
LookupType(21, 'QF', 'Service Disabled Veteran Owned Business'),
LookupType(22, 'A2', 'Woman Owned Business'),
LookupType(23, '23', 'Minority Owned Business'),
LookupType(24, 'FR', 'Asian-Pacific American Owned'),
LookupType(25, 'QZ', 'Subcontinent Asian (Asian-Indian), American Owned'),
LookupType(26, 'OY', 'Black American Owned'),
LookupType(27, 'PI', 'Hispanic American Owned'),
LookupType(28, 'NB', 'Native American Owned'),
LookupType(29, 'ZZ', 'Other'),
LookupType(30, '8W', 'Woman Owned Small Business'),
LookupType(31, '27', 'Self Certified Small Disadvantaged Business'),
LookupType(32, 'JX', 'Self Certified HUBZone Joint Venture'),
LookupType(33, '8E', 'Economically Disadvantaged Women-Owned Small Business'),
LookupType(34, '8C', 'Joint Venture Women-Owned Small Business'),
LookupType(35, '8D', 'Economically Disadvantaged Joint Venture Women-Owned Small Business'),
LookupType(36, 'NG', 'Federal Agency'),
LookupType(37, 'QW', 'Federally Funded Research and Development Center'),
LookupType(38, 'C8', 'City'),
LookupType(39, 'C7', 'County'),
LookupType(40, 'ZR', 'Inter-municipal'),
LookupType(41, 'MG', 'Local Government Owned'),
LookupType(42, 'C6', 'Municipality'),
LookupType(43, 'H6', 'School District'),
LookupType(44, 'TW', 'Transit Authority'),
LookupType(45, 'UD', 'Council of Governments'),
LookupType(46, '8B', 'Housing Authorities Public/Tribal'),
LookupType(47, '86', 'Interstate Entity'),
LookupType(48, 'KM', 'Planning Commission'),
LookupType(49, 'T4', 'Port Authority'),
LookupType(50, 'H2', 'Community Development Corporation'),
LookupType(51, '6D', 'Domestic Shelter'),
LookupType(52, 'M8', 'Educational Institution'),
LookupType(53, 'G6', '1862 Land Grant College'),
LookupType(54, 'G7', '1890 Land Grant College'),
LookupType(55, 'G8', '1994 Land Grant College'),
LookupType(56, 'HB', 'Historically Black College or University'),
LookupType(57, '1A', 'Minority Institution'),
LookupType(58, '1R', 'Private University or College'),
LookupType(59, 'ZW', 'School of Forestry'),
LookupType(60, 'GW', 'Hispanic Servicing Institution'),
LookupType(61, 'OH', 'State Controlled Institution of Higher Learning'),
LookupType(62, 'HS', 'Tribal College'),
LookupType(63, 'QU', 'Veterinary College'),
LookupType(64, 'G3', 'Alaskan Native Servicing Institution'),
LookupType(65, 'G5', 'Native Hawaiian Servicing Institution'),
LookupType(66, 'BZ', 'Foundation'),
LookupType(67, '80', 'Hospital'),
LookupType(68, 'FY', 'Veterinary Hospital'),
LookupType(69, 'HQ', 'DOT Certified DBE'),
LookupType(70, '05', 'Alaskan Native Corporation Owned Firm'),
LookupType(71, 'OW', 'American Indian Owned'),
LookupType(72, 'XY', 'Indian Tribe (Federally Recognized),'),
LookupType(73, '8U', 'Native Hawaiian Organization Owned Firm'),
LookupType(74, '1B', 'Tribally Owned Firm'),
LookupType(75, 'FO', 'Township'),
LookupType(76, 'TR', 'Airport Authority'),
LookupType(77, 'G9', 'Other Than One of the Proceeding'),
LookupType(78, '2J', 'Sole Proprietorship'),
LookupType(79, '2K', 'Partnership or Limited Liability Partnership'),
LookupType(80, '2L', 'Corporate Entity (Not Tax Exempt),'),
LookupType(81, '8H', 'Corporate Entity (Tax Exempt),'),
LookupType(82, '2A', 'U.S. Government Entity'),
LookupType(83, 'X6', 'International Organization')
]
SAM_BUSINESS_TYPE_DICT = {item.name: item.desc for item in SAM_BUSINESS_TYPE}
BUSINESS_FUNDS_IND = [
LookupType(1, 'NON', 'Not Recovery Act'),
LookupType(2, 'REC', 'Recovery Act')
]
BUSINESS_FUNDS_IND_DICT = {item.name: item.desc for item in BUSINESS_FUNDS_IND}
BUSINESS_CATEGORY_FIELDS = ['airport_authority', 'alaskan_native_owned_corpo', 'alaskan_native_servicing_i',
'american_indian_owned_busi', 'asian_pacific_american_own', 'black_american_owned_busin',
'c1862_land_grant_college', 'c1890_land_grant_college', 'c1994_land_grant_college',
'c8a_program_participant', 'city_local_government', 'community_developed_corpor',
'community_development_corp', 'contracting_officers_deter', 'corporate_entity_not_tax_e',
'corporate_entity_tax_exemp', 'council_of_governments', 'county_local_government',
'domestic_or_foreign_entity', 'domestic_shelter', 'dot_certified_disadvantage',
'economically_disadvantaged', 'educational_institution', 'emerging_small_business',
'federal_agency', 'federally_funded_research', 'for_profit_organization',
'foreign_government', 'foreign_owned_and_located', 'foundation',
'hispanic_american_owned_bu', 'hispanic_servicing_institu', 'historically_black_college',
'historically_underutilized', 'hospital_flag', 'housing_authorities_public',
'indian_tribe_federally_rec', 'inter_municipal_local_gove', 'international_organization',
'interstate_entity', 'joint_venture_economically', 'joint_venture_women_owned',
'labor_surplus_area_firm', 'limited_liability_corporat', 'local_government_owned',
'manufacturer_of_goods', 'minority_institution', 'minority_owned_business',
'municipality_local_governm', 'native_american_owned_busi', 'native_hawaiian_owned_busi',
'native_hawaiian_servicing', 'nonprofit_organization', 'other_minority_owned_busin',
'other_not_for_profit_organ', 'partnership_or_limited_lia', 'planning_commission',
'port_authority', 'private_university_or_coll', 'sba_certified_8_a_joint_ve',
'school_district_local_gove', 'school_of_forestry', 'self_certified_small_disad',
'service_disabled_veteran_o', 'small_agricultural_coopera', 'small_disadvantaged_busine',
'sole_proprietorship', 'state_controlled_instituti', 'subchapter_s_corporation',
'subcontinent_asian_asian_i', 'the_ability_one_program', 'township_local_government',
'transit_authority', 'tribal_college', 'tribally_owned_business', 'us_federal_government',
'us_government_entity', 'us_local_government', 'us_state_government',
'us_tribal_government', 'veteran_owned_business', 'veterinary_college',
'veterinary_hospital', 'woman_owned_business', 'women_owned_small_business']
EXTERNAL_DATA_TYPE = [
LookupType(1, 'usps_download', 'external data load type for downloading zip files'),
LookupType(2, 'program_activity_upload', 'program activity file loaded into S3'),
LookupType(3, 'cfda', 'GSA CFDA loaded'),
LookupType(4, 'agency', 'IAE agency data loaded'),
LookupType(5, 'tas', 'FRB CARS/TAS data loaded'),
LookupType(6, 'city', 'USGS city data loaded'),
LookupType(7, 'congressional_district', 'USPS congressional district data loaded'),
LookupType(8, 'country_code', 'country code data loaded'),
LookupType(9, 'county_code', 'USGS county code data loaded'),
LookupType(10, 'recipient', 'SAM DUNS/UEI data loaded'),
LookupType(11, 'executive_compensation', 'SAM executive compensation data loaded'),
LookupType(12, 'fpds', 'FPDS data loaded'),
LookupType(13, 'gtas', 'FRB gtas/sf-133 data loaded'),
LookupType(14, 'object_class', 'OMB object class data loaded'),
LookupType(15, 'office', 'GSA office data loaded'),
LookupType(16, 'program_activity', 'OMB program activity data loaded'),
LookupType(17, 'state_code', 'state code data loaded'),
LookupType(18, 'subaward', 'FSRS subaward data loaded'),
LookupType(19, 'zip_code', 'USPS zip code data loaded'),
LookupType(20, 'defc', 'disaster emergency fund code data loaded'),
LookupType(21, 'failed_tas', 'TAS failing edits data loaded')
]
EXTERNAL_DATA_TYPE_DICT = {item.name: item.id for item in EXTERNAL_DATA_TYPE}
|
cc0-1.0
|
b0baca212b53c28d12a073bf561b8a40
| 56.506494
| 120
| 0.661698
| 3.216153
| false
| false
| false
| false
|
fedspendingtransparency/data-act-broker-backend
|
dataactcore/migrations/versions/778cabef7323_rename_and_add_office_type_booleans_in_.py
|
1
|
1561
|
"""Rename and add office type booleans in office table
Revision ID: 778cabef7323
Revises: 7dd3f4b007e5
Create Date: 2019-05-10 10:01:26.888903
"""
# revision identifiers, used by Alembic.
revision = '778cabef7323'
down_revision = '5f29b283f23e'
branch_labels = None
depends_on = None
from alembic import op
import sqlalchemy as sa
def upgrade(engine_name):
globals()["upgrade_%s" % engine_name]()
def downgrade(engine_name):
globals()["downgrade_%s" % engine_name]()
def upgrade_data_broker():
# ### commands auto generated by Alembic - please adjust! ###
op.execute("ALTER TABLE office RENAME COLUMN funding_office TO contract_funding_office")
op.execute("ALTER TABLE office RENAME COLUMN contracting_office TO contract_awards_office")
op.execute("ALTER TABLE office RENAME COLUMN financial_assistance_office TO financial_assistance_awards_office")
op.add_column('office', sa.Column('financial_assistance_funding_office', sa.Boolean(), server_default='False', nullable=False))
# ### end Alembic commands ###
def downgrade_data_broker():
# ### commands auto generated by Alembic - please adjust! ###
op.execute("ALTER TABLE office RENAME COLUMN contract_funding_office TO funding_office")
op.execute("ALTER TABLE office RENAME COLUMN contract_awards_office TO contracting_office")
op.execute("ALTER TABLE office RENAME COLUMN financial_assistance_awards_office TO financial_assistance_office")
op.drop_column('office', 'financial_assistance_funding_office')
# ### end Alembic commands ###
|
cc0-1.0
|
96cf6a454ee6ff09e084adf7d9e8b0d7
| 32.934783
| 131
| 0.736067
| 3.555809
| false
| false
| false
| false
|
fedspendingtransparency/data-act-broker-backend
|
tests/unit/dataactvalidator/test_fabs39_2.py
|
1
|
2847
|
from tests.unit.dataactcore.factories.staging import FABSFactory
from tests.unit.dataactvalidator.utils import number_of_errors, query_columns
_FILE = 'fabs39_2'
def test_column_headers(database):
expected_subset = {'row_number', 'record_type', 'place_of_performance_code', 'place_of_perform_country_c',
'uniqueid_AssistanceTransactionUniqueKey'}
actual = set(query_columns(_FILE, database))
assert expected_subset == actual
def test_success(database):
""" PrimaryPlaceOfPerformanceCode must be 00FORGN when PrimaryPlaceofPerformanceCountryCode is not USA,
not 00FORGN otherwise for record type 1 and 2.
"""
fabs_1 = FABSFactory(place_of_performance_code='00FORGN', place_of_perform_country_c='UKR', record_type=1,
correction_delete_indicatr='')
fabs_2 = FABSFactory(place_of_performance_code='00FoRGN', place_of_perform_country_c='uKr', record_type=1,
correction_delete_indicatr=None)
fabs_3 = FABSFactory(place_of_performance_code='ny**987', place_of_perform_country_c='USA', record_type=2,
correction_delete_indicatr='c')
fabs_4 = FABSFactory(place_of_performance_code='NY**987', place_of_perform_country_c='UsA', record_type=2,
correction_delete_indicatr='C')
fabs_5 = FABSFactory(place_of_performance_code='NY**987', place_of_perform_country_c='UKR', record_type=3,
correction_delete_indicatr='')
# Ignore correction delete indicator of D
fabs_6 = FABSFactory(place_of_performance_code='00FORGN', place_of_perform_country_c='USA', record_type=1,
correction_delete_indicatr='d')
errors = number_of_errors(_FILE, database, models=[fabs_1, fabs_2, fabs_3, fabs_4, fabs_5, fabs_6])
assert errors == 0
def test_failure(database):
""" Test failure for PrimaryPlaceOfPerformanceCode must be 00FORGN when PrimaryPlaceofPerformanceCountryCode
is not USA, not 00FORGN otherwise for record type 1 and 2.
"""
fabs_1 = FABSFactory(place_of_performance_code='00FORGN', place_of_perform_country_c='USA', record_type=1,
correction_delete_indicatr='')
fabs_2 = FABSFactory(place_of_performance_code='00FoRGN', place_of_perform_country_c='usA', record_type=1,
correction_delete_indicatr=None)
fabs_3 = FABSFactory(place_of_performance_code='ny**987', place_of_perform_country_c='UKR', record_type=2,
correction_delete_indicatr='c')
fabs_4 = FABSFactory(place_of_performance_code='NY**987', place_of_perform_country_c='ukR', record_type=2,
correction_delete_indicatr='C')
errors = number_of_errors(_FILE, database, models=[fabs_1, fabs_2, fabs_3, fabs_4])
assert errors == 4
|
cc0-1.0
|
b7b04a9465d6c9ce067fe3378e6458dd
| 55.94
| 112
| 0.662803
| 3.393325
| false
| true
| false
| false
|
fedspendingtransparency/data-act-broker-backend
|
tests/integration/integration_test_helper.py
|
1
|
3026
|
import calendar
from datetime import datetime
from dataactcore.models.jobModels import Submission, Job
def insert_submission(sess, submission_user_id, cgac_code=None, frec_code=None, start_date=None, end_date=None,
is_quarter=False, number_of_errors=0, publish_status_id=1, is_fabs=False,
updated_at=datetime.utcnow(), test_submission=False, published_submission_ids=[],
certified=False, reporting_fiscal_year=None, reporting_fisacal_period=None):
"""Insert one submission into job tracker and get submission ID back."""
publishable = True if number_of_errors == 0 else False
if start_date is not None:
start_date = datetime.strptime(start_date, '%m/%Y')
if end_date is not None:
end_date = datetime.strptime(end_date, '%m/%Y')
end_date = datetime.strptime(
str(end_date.year) + '/'
+ str(end_date.month) + '/'
+ str(calendar.monthrange(end_date.year, end_date.month)[1]),
'%Y/%m/%d'
).date()
sub = Submission(created_at=datetime.utcnow(),
updated_at=updated_at,
user_id=submission_user_id,
cgac_code=cgac_code,
frec_code=frec_code,
reporting_start_date=start_date,
reporting_end_date=end_date,
reporting_fiscal_year=reporting_fiscal_year,
reporting_fiscal_period=reporting_fisacal_period,
is_quarter_format=is_quarter,
number_of_errors=number_of_errors,
publish_status_id=publish_status_id,
publishable=publishable,
is_fabs=is_fabs,
test_submission=test_submission,
published_submission_ids=published_submission_ids,
certified=certified)
sess.add(sub)
sess.commit()
return sub.submission_id
def insert_job(sess, filetype, status, type_id, submission, job_id=None, filename=None, original_filename=None,
file_size=None, num_rows=None, num_valid_rows=0, num_errors=0, updated_at=None):
"""Insert one job into job tracker and get ID back."""
if not updated_at:
updated_at = datetime.utcnow()
job = Job(
created_at=datetime.utcnow(),
updated_at=updated_at,
file_type_id=filetype,
job_status_id=status,
job_type_id=type_id,
submission_id=submission,
filename=filename,
original_filename=original_filename,
file_size=file_size,
number_of_rows=num_rows,
number_of_rows_valid=num_valid_rows,
number_of_errors=num_errors
)
if job_id:
job.job_id = job_id
sess.add(job)
sess.commit()
return job
def get_submission(sess, sub_id):
""" Get back the requested submission """
return sess.query(Submission).filter_by(submission_id=sub_id).one_or_none()
|
cc0-1.0
|
5d65528c4c2d25e7dee32e0262672798
| 39.891892
| 111
| 0.590879
| 3.796738
| false
| false
| false
| false
|
fedspendingtransparency/data-act-broker-backend
|
dataactcore/migrations/versions/361fbffcf08b_added_named_job_dep_fks.py
|
2
|
1044
|
"""added_named_job_dep_fks
Revision ID: 361fbffcf08b
Revises: 9058e0136aba
Create Date: 2016-08-26 10:33:18.803848
"""
# revision identifiers, used by Alembic.
revision = '361fbffcf08b'
down_revision = '9058e0136aba'
branch_labels = None
depends_on = None
from alembic import op
def upgrade(engine_name):
globals()["upgrade_%s" % engine_name]()
def downgrade(engine_name):
globals()["downgrade_%s" % engine_name]()
def upgrade_data_broker():
### commands auto generated by Alembic - please adjust! ###
op.create_foreign_key('fk_dep_job_id', 'job_dependency', 'job', ['job_id'], ['job_id'])
op.create_foreign_key('fk_prereq_job_id', 'job_dependency', 'job', ['prerequisite_id'], ['job_id'])
### end Alembic commands ###
def downgrade_data_broker():
### commands auto generated by Alembic - please adjust! ###
op.drop_constraint('fk_prereq_job_id', 'job_dependency', type_='foreignkey')
op.drop_constraint('fk_dep_job_id', 'job_dependency', type_='foreignkey')
### end Alembic commands ###
|
cc0-1.0
|
60852055d545a13fe2d75825b7577a88
| 24.463415
| 103
| 0.66954
| 3.144578
| false
| false
| false
| false
|
fedspendingtransparency/data-act-broker-backend
|
dataactcore/scripts/populate_subaward_table.py
|
1
|
6271
|
import os
import argparse
import datetime
import logging
import json
import sys
from dataactcore.interfaces.db import GlobalDB
from dataactcore.config import CONFIG_BROKER
from dataactcore.broker_logging import configure_logging
from dataactvalidator.health_check import create_app
from dataactbroker.fsrs import GRANT, PROCUREMENT
RAW_SQL_DIR = os.path.join(CONFIG_BROKER['path'], 'dataactcore', 'scripts', 'raw_sql')
POPULATE_PROCUREMENT_SQL = os.path.join(RAW_SQL_DIR, 'populate_subaward_table_contracts.sql')
POPULATE_GRANT_SQL = os.path.join(RAW_SQL_DIR, 'populate_subaward_table_grants.sql')
LINK_PROCUREMENT_SQL = os.path.join(RAW_SQL_DIR, 'link_broken_subaward_contracts.sql')
LINK_GRANT_SQL = os.path.join(RAW_SQL_DIR, 'link_broken_subaward_grants.sql')
logger = logging.getLogger(__name__)
def extract_subaward_sql(service_type, data_change_type):
""" Gather the subaward SQL requested
Args:
service_type: type of service to ping ('procurement_service' or 'grant_service')
data_change_type: type of data change involving subawards ('populate' or 'link')
Raises:
Exception: service type is invalid
Exception: data change type is invalid
"""
pop_sql_map = {PROCUREMENT: POPULATE_PROCUREMENT_SQL, GRANT: POPULATE_GRANT_SQL}
link_sql_map = {PROCUREMENT: LINK_PROCUREMENT_SQL, GRANT: LINK_GRANT_SQL}
if service_type not in pop_sql_map:
raise Exception('Invalid service type provided: {}'.format(service_type))
type_map = {'populate': pop_sql_map, 'link': link_sql_map}
if data_change_type not in type_map:
raise Exception('Invalid data change type provided: {}'.format(data_change_type))
with open(type_map[data_change_type][service_type], 'r') as sql_file:
sql = sql_file.read()
return sql
def populate_subaward_table(sess, service_type, ids=None, min_id=None):
""" Populates the subaward table based on the IDS (or min id) provided
Args:
sess: connection to the database
service_type: type of service to ping (usually 'procurement_service' or 'grant_service')
ids: if provided, only update these ids
min_id: if provided, update all ids past this one
Raises:
Exception: ids and min_id both provided or both not provided
Exception: service type is invalid
"""
if (ids is not None and min_id is not None) or (ids is None and min_id is None):
raise Exception('ids or min_id, but not both, must be provided')
sql = extract_subaward_sql(service_type, 'populate')
if min_id is not None:
operator = '>'
values = min_id - 1
else:
operator = 'IN'
values = '({})'.format(','.join([str(id) for id in ids]))
sql = sql.format(operator, values)
# run the SQL
inserted = sess.execute(sql)
sess.commit()
inserted_count = inserted.rowcount
award_type = service_type[:service_type.index('_')]
logger.info('Inserted {} sub-{}s to the subaward table'.format(inserted_count, award_type))
return inserted_count
def fix_broken_links(sess, service_type, min_date=None):
""" Attempts to resolve any unlinked subawards given the current data
Args:
sess: connection to the database
service_type: type of service to ping (usually 'procurement_service' or 'grant_service')
Raises:
Exception: service type is invalid
"""
award_type = service_type[:service_type.index('_')]
logger.info('Attempting to fix broken sub-{} links in the subaward table'.format(award_type))
subaward_type_map = {PROCUREMENT: 'sub-contract', GRANT: 'sub-grant'}
if service_type not in subaward_type_map:
raise Exception('Invalid service type provided: {}'.format(service_type))
sql = extract_subaward_sql(service_type, 'link')
min_date_sql = '' if min_date is None else 'AND updated_at >= \'{}\''.format(min_date)
sql = sql.format(min_date_sql)
# run the SQL
updated = sess.execute(sql)
sess.commit()
updated_count = updated.rowcount
logger.info('Updated {} sub-{}s in the subaward table'.format(updated_count, award_type))
return updated_count
if __name__ == '__main__':
now = datetime.datetime.now()
configure_logging()
parser = argparse.ArgumentParser(description='Pull data from FSRS Feed')
method = parser.add_mutually_exclusive_group(required=True)
parser.add_argument('-p', '--procurements', action='store_true', help="Load just procurement awards")
parser.add_argument('-g', '--grants', action='store_true', help="Load just grant awards")
method.add_argument('-m', '--min_id', type=int, nargs=1, help="Load all data from a minimum id (0 for complete"
" backfill)")
method.add_argument('-i', '--ids', type=int, nargs='+',
help="Single or list of FSRS ids to populate the subaward table")
with create_app().app_context():
logger.info("Begin backfilling Subaward table")
sess = GlobalDB.db().session
args = parser.parse_args()
metrics_json = {
'script_name': 'populate_subaward_table.py',
'records_inserted': 0,
'start_time': str(now)
}
service_types = []
if not (args.procurements or args.grants):
logger.error('FSRS types not provided. Please specify procurements, grants, or both.')
sys.exit(1)
if args.procurements:
service_types.append(PROCUREMENT)
if args.grants:
service_types.append(GRANT)
records_inserted = 0
for service_type in service_types:
if args.min_id:
records_inserted += populate_subaward_table(sess, service_type, min_id=args.min_id[0])
elif args.ids:
records_inserted += populate_subaward_table(sess, service_type, ids=args.ids)
metrics_json['records_inserted'] = records_inserted
metrics_json['duration'] = str(datetime.datetime.now() - now)
with open('populate_subaward_table .json', 'w+') as metrics_file:
json.dump(metrics_json, metrics_file)
|
cc0-1.0
|
39a382b66d70a56110769674eb701ef2
| 40.256579
| 115
| 0.646946
| 3.667251
| false
| false
| false
| false
|
fedspendingtransparency/data-act-broker-backend
|
tests/unit/dataactvalidator/test_a7_appropriations.py
|
1
|
2660
|
from dataactcore.models.stagingModels import Appropriation
from dataactcore.models.domainModels import SF133
from tests.unit.dataactvalidator.utils import number_of_errors, query_columns
_FILE = 'a7_appropriations'
_TAS = 'a7_appropriations_tas'
def test_column_headers(database):
expected_subset = {'uniqueid_TAS', 'row_number', 'budget_authority_unobligat_fyb',
'expected_value_GTAS SF133 Line 1000', 'difference'}
actual = set(query_columns(_FILE, database))
assert expected_subset == actual
def test_success(database):
""" Tests that SF 133 amount for line 1000 matches Appropriation budget_authority_unobligat_fyb for the specified
fiscal year and period
"""
tas_1 = 'tas_one_line_1'
tas_2 = 'tas_one_line_2'
sf_1 = SF133(line=1000, tas=tas_1, period=1, fiscal_year=2016, amount=1, agency_identifier='sys',
main_account_code='000', sub_account_code='000')
sf_2 = SF133(line=1000, tas=tas_2, period=1, fiscal_year=2016, amount=0, agency_identifier='sys',
main_account_code='000', sub_account_code='000')
ap_1 = Appropriation(job_id=1, row_number=1, tas=tas_1, budget_authority_unobligat_fyb=1)
ap_2 = Appropriation(job_id=2, row_number=1, tas=tas_2, budget_authority_unobligat_fyb=None)
assert number_of_errors(_FILE, database, models=[sf_1, sf_2, ap_1, ap_2]) == 0
# Test with split SF133 lines
tas = 'tas_two_lines'
sf_1 = SF133(line=1000, tas=tas, period=1, fiscal_year=2016, amount=1, agency_identifier='sys',
main_account_code='000', sub_account_code='000', disaster_emergency_fund_code='n')
sf_2 = SF133(line=1000, tas=tas, period=1, fiscal_year=2016, amount=4, agency_identifier='sys',
main_account_code='000', sub_account_code='000', disaster_emergency_fund_code='o')
ap = Appropriation(job_id=1, row_number=1, tas=tas, budget_authority_unobligat_fyb=5)
assert number_of_errors(_FILE, database, models=[sf_1, sf_2, ap]) == 0
def test_failure(database):
""" Tests that SF 133 amount for line 1000 does not match Appropriation budget_authority_unobligat_fyb for the
specified fiscal year and period
"""
tas = 'fail_tas'
sf = SF133(line=1000, tas=tas, period=1, fiscal_year=2016, amount=1, agency_identifier='sys',
main_account_code='000', sub_account_code='000')
ap_1 = Appropriation(job_id=1, row_number=1, tas=tas, budget_authority_unobligat_fyb=0)
ap_2 = Appropriation(job_id=2, row_number=1, tas=tas, budget_authority_unobligat_fyb=None)
assert number_of_errors(_FILE, database, models=[sf, ap_1, ap_2]) == 2
|
cc0-1.0
|
32ab0dee0ec6eda2091d97880839c5d0
| 46.5
| 117
| 0.67594
| 2.96544
| false
| true
| false
| false
|
fedspendingtransparency/data-act-broker-backend
|
dataactcore/migrations/versions/da2e50d423ff_create_frec_table.py
|
1
|
1237
|
"""create FREC table
Revision ID: da2e50d423ff
Revises: aa10ae595d3e
Create Date: 2017-07-06 10:27:04.738865
"""
# revision identifiers, used by Alembic.
revision = 'da2e50d423ff'
down_revision = 'aa10ae595d3e'
branch_labels = None
depends_on = None
from alembic import op
import sqlalchemy as sa
def upgrade(engine_name):
globals()["upgrade_%s" % engine_name]()
def downgrade(engine_name):
globals()["downgrade_%s" % engine_name]()
def upgrade_data_broker():
### commands auto generated by Alembic - please adjust! ###
op.create_table('frec',
sa.Column('created_at', sa.DateTime(), nullable=True),
sa.Column('updated_at', sa.DateTime(), nullable=True),
sa.Column('frec_id', sa.Integer(), nullable=False),
sa.Column('frec_code', sa.Text(), nullable=True),
sa.Column('agency_name', sa.Text(), nullable=True),
sa.PrimaryKeyConstraint('frec_id')
)
op.create_index(op.f('ix_frec_frec_code'), 'frec', ['frec_code'], unique=True)
### end Alembic commands ###
def downgrade_data_broker():
### commands auto generated by Alembic - please adjust! ###
op.drop_index(op.f('ix_frec_frec_code'), table_name='frec')
op.drop_table('frec')
### end Alembic commands ###
|
cc0-1.0
|
a4daa93e171e0f4f8860beff155c9bab
| 24.244898
| 82
| 0.665319
| 3.115869
| false
| false
| false
| false
|
fedspendingtransparency/data-act-broker-backend
|
dataactcore/migrations/versions/449ab366f333_adding_fiscal_year_and_period_to_submission.py
|
2
|
1089
|
"""adding fiscal year and period to submission
Revision ID: 449ab366f333
Revises: a0a4f1ef56ae
Create Date: 2016-08-11 13:21:49.526346
"""
# revision identifiers, used by Alembic.
revision = '449ab366f333'
down_revision = '5a9051f9bfc5'
branch_labels = None
depends_on = None
from alembic import op
import sqlalchemy as sa
def upgrade(engine_name):
globals()["upgrade_%s" % engine_name]()
def downgrade(engine_name):
globals()["downgrade_%s" % engine_name]()
def upgrade_data_broker():
### commands auto generated by Alembic - please adjust! ###
op.add_column('submission', sa.Column('reporting_fiscal_period', sa.Integer(), server_default='0', nullable=False))
op.add_column('submission', sa.Column('reporting_fiscal_year', sa.Integer(), server_default='0', nullable=False))
### end Alembic commands ###
def downgrade_data_broker():
### commands auto generated by Alembic - please adjust! ###
op.drop_column('submission', 'reporting_fiscal_year')
op.drop_column('submission', 'reporting_fiscal_period')
### end Alembic commands ###
|
cc0-1.0
|
023d3341884b4a6b4a6c47bb9449cdfe
| 24.928571
| 119
| 0.699725
| 3.468153
| false
| false
| false
| false
|
fedspendingtransparency/data-act-broker-backend
|
dataactcore/migrations/versions/52b3c1a122ce_add_certified_files_history_table_and_update_certify_history_table.py
|
1
|
2380
|
"""Add certified_files_history table and update certify_history table
Revision ID: 52b3c1a122ce
Revises: 204e2cf584cd
Create Date: 2017-06-05 12:37:10.724212
"""
# revision identifiers, used by Alembic.
revision = '52b3c1a122ce'
down_revision = '204e2cf584cd'
branch_labels = None
depends_on = None
from alembic import op
import sqlalchemy as sa
def upgrade(engine_name):
globals()["upgrade_%s" % engine_name]()
def downgrade(engine_name):
globals()["downgrade_%s" % engine_name]()
def upgrade_data_broker():
### commands auto generated by Alembic - please adjust! ###
op.create_table('certified_files_history',
sa.Column('created_at', sa.DateTime(), nullable=True),
sa.Column('updated_at', sa.DateTime(), nullable=True),
sa.Column('certified_files_history_id', sa.Integer(), nullable=False),
sa.Column('certify_history_id', sa.Integer(), nullable=True),
sa.Column('submission_id', sa.Integer(), nullable=True),
sa.Column('filename', sa.Text(), nullable=True),
sa.Column('file_type_id', sa.Integer(), nullable=True),
sa.Column('warning_filename', sa.Text(), nullable=True),
sa.Column('narrative', sa.Text(), nullable=True),
sa.ForeignKeyConstraint(['certify_history_id'], ['certify_history.certify_history_id'], name='fk_certify_history_certified_files_id'),
sa.ForeignKeyConstraint(['file_type_id'], ['file_type.file_type_id'], name='fk_certified_files_history_file_type'),
sa.ForeignKeyConstraint(['submission_id'], ['submission.submission_id'], name='fk_certified_files_history_submission_id'),
sa.PrimaryKeyConstraint('certified_files_history_id')
)
op.drop_constraint('fk_certify_history_submission_id', 'certify_history', type_='foreignkey')
op.create_foreign_key('fk_certify_history_submission_id', 'certify_history', 'submission', ['submission_id'], ['submission_id'])
### end Alembic commands ###
def downgrade_data_broker():
### commands auto generated by Alembic - please adjust! ###
op.drop_constraint('fk_certify_history_submission_id', 'certify_history', type_='foreignkey')
op.create_foreign_key('fk_certify_history_submission_id', 'certify_history', 'submission', ['submission_id'], ['submission_id'], ondelete='CASCADE')
op.drop_table('certified_files_history')
### end Alembic commands ###
|
cc0-1.0
|
1c1ffac479b969928ad335e2cf7edf8f
| 40.034483
| 152
| 0.687395
| 3.505155
| false
| false
| false
| false
|
fedspendingtransparency/data-act-broker-backend
|
dataactvalidator/scripts/load_submission_window_schedule.py
|
1
|
2825
|
import os
import logging
import boto3
import pandas as pd
from datetime import datetime, timedelta
from dataactcore.broker_logging import configure_logging
from dataactcore.interfaces.db import GlobalDB
from dataactcore.config import CONFIG_BROKER
from dataactcore.models.jobModels import SubmissionWindowSchedule
from dataactvalidator.health_check import create_app
from dataactvalidator.scripts.loader_utils import clean_data, insert_dataframe
logger = logging.getLogger(__name__)
def add_day(row, col):
""" Adds 1 day to whatever date is provided (used for adding a day to the deadlines)
Args:
row: the row to update
col: the name of the
"""
new_date = datetime.strptime(row[col], '%m/%d/%y') + timedelta(days=1)
return new_date.strftime('%m/%d/%y')
def load_submission_window_schedule():
""" Loads the submission window schedule data. """
if CONFIG_BROKER["use_aws"]:
s3_client = boto3.client('s3', region_name=CONFIG_BROKER['aws_region'])
sub_schedule_file = s3_client.generate_presigned_url('get_object', {'Bucket': CONFIG_BROKER['sf_133_bucket'],
'Key': "submission_window_schedule.csv"},
ExpiresIn=600)
else:
sub_schedule_file = os.path.join(CONFIG_BROKER['path'], 'dataactvalidator', 'config',
'submission_window_schedule.csv')
logger.info('Loading submission window schedule data')
with create_app().app_context():
data = pd.read_csv(sub_schedule_file, dtype=str)
data = clean_data(
data,
SubmissionWindowSchedule,
{
'year': 'year',
'period': 'period',
'period_start': 'period_start',
'publish_deadline': 'publish_deadline',
'certification_deadline': 'certification_deadline'
},
{}
)
# Add a day to the deadlines because the dates in the file are supposed to be inclusive
data['publish_deadline'] = data.apply(lambda x: add_day(x, 'publish_deadline'), axis=1)
data['certification_deadline'] = data.apply(lambda x: add_day(x, 'certification_deadline'), axis=1)
sess = GlobalDB.db().session
# delete any data in the SubmissionWindowSchedule table
sess.query(SubmissionWindowSchedule).delete()
# insert data into table
num = insert_dataframe(data, SubmissionWindowSchedule.__table__.name, sess.connection())
logger.info('{} records inserted to submission_window_schedule'.format(num))
sess.commit()
if __name__ == '__main__':
configure_logging()
load_submission_window_schedule()
|
cc0-1.0
|
e532656ec0fe5c04b71a9c1a89bd4dc9
| 37.175676
| 117
| 0.614513
| 4.197623
| false
| true
| false
| false
|
fedspendingtransparency/data-act-broker-backend
|
dataactcore/migrations/versions/5dbc09749ce0_published_fabs_index_on_true.py
|
1
|
1029
|
"""published fabs index on true
Revision ID: 5dbc09749ce0
Revises: 605bcaf99c01
Create Date: 2018-01-09 10:04:40.991666
"""
# revision identifiers, used by Alembic.
revision = '5dbc09749ce0'
down_revision = '605bcaf99c01'
branch_labels = None
depends_on = None
from alembic import op
import sqlalchemy as sa
def upgrade(engine_name):
globals()["upgrade_%s" % engine_name]()
def downgrade(engine_name):
globals()["downgrade_%s" % engine_name]()
def upgrade_data_broker():
### commands auto generated by Alembic - please adjust! ###
op.create_index('ix_published_award_financial_assistance_is_active', 'published_award_financial_assistance', ['is_active'], unique=False, postgresql_where=sa.text('is_active IS true'))
### end Alembic commands ###
def downgrade_data_broker():
### commands auto generated by Alembic - please adjust! ###
op.drop_index('ix_published_award_financial_assistance_is_active', table_name='published_award_financial_assistance')
### end Alembic commands ###
|
cc0-1.0
|
baed6f8d49ea2f2ec8d680b0d7c890b5
| 24.725
| 188
| 0.713314
| 3.319355
| false
| false
| false
| false
|
fedspendingtransparency/data-act-broker-backend
|
dataactvalidator/app.py
|
1
|
17035
|
import logging
import csv
import ddtrace
import inspect
import time
import traceback
from ddtrace.constants import ANALYTICS_SAMPLE_RATE_KEY
from ddtrace.ext import SpanTypes
from flask import Flask, g, current_app
from dataactcore.aws.sqsHandler import sqs_queue
from dataactcore.config import CONFIG_BROKER, CONFIG_SERVICES
from dataactcore.interfaces.db import GlobalDB
from dataactcore.interfaces.function_bag import mark_job_status, write_file_error
from dataactcore.broker_logging import configure_logging
from dataactcore.models.jobModels import Job, FileGeneration
from dataactcore.models.lookups import JOB_STATUS_DICT
from dataactcore.utils.responseException import ResponseException
from dataactcore.utils.statusCode import StatusCode
from dataactcore.utils.tracing import DatadogEagerlyDropTraceFilter, SubprocessTrace
from dataactvalidator.sqs_work_dispatcher import SQSWorkDispatcher
from dataactvalidator.validation_handlers.file_generation_manager import FileGenerationManager
from dataactvalidator.validation_handlers.validationError import ValidationError
from dataactvalidator.validation_handlers.validationManager import ValidationManager
from dataactvalidator.validator_logging import log_job_message
logger = logging.getLogger(__name__)
# Replace below param with enabled=True during env-deploys to turn on
ddtrace.tracer.configure(enabled=False)
if ddtrace.tracer.enabled:
ddtrace.config.flask["service_name"] = "validator"
ddtrace.config.flask["analytics_enabled"] = True # capture APM "Traces" & "Analyzed Spans" in App Analytics
ddtrace.config.flask["analytics_sample_rate"] = 1.0 # Including 100% of traces in sample
# Distributed tracing only needed if picking up disjoint traces by HTTP Header value
ddtrace.config.django["distributed_tracing_enabled"] = False
# patch_all() captures traces from integrated components' libraries by patching them. See:
# - http://pypi.datadoghq.com/trace/docs/advanced_usage.html#patch-all
# - Integrated Libs: http://pypi.datadoghq.com/trace/docs/index.html#supported-libraries
ddtrace.patch_all()
READY_STATUSES = [JOB_STATUS_DICT['waiting'], JOB_STATUS_DICT['ready']]
RUNNING_STATUSES = READY_STATUSES + [JOB_STATUS_DICT['running']]
JOB_TYPE = "Validator"
def create_app():
return Flask(__name__)
def run_app():
""" Run the application. """
app = create_app()
with app.app_context():
current_app.debug = CONFIG_SERVICES['debug']
local = CONFIG_BROKER['local']
g.is_local = local
current_app.config.from_object(__name__)
# Future: Override config w/ environment variable, if set
current_app.config.from_envvar('VALIDATOR_SETTINGS', silent=True)
queue = sqs_queue()
logger.info("Starting SQS polling")
keep_polling = True
while keep_polling:
# Start a Datadog Trace for this poll iter to capture activity in APM
with ddtrace.tracer.trace(
name=f"job.{JOB_TYPE}", service=JOB_TYPE.lower(), resource=queue.url, span_type=SpanTypes.WORKER
) as span:
# Set True to add trace to App Analytics:
# - https://docs.datadoghq.com/tracing/app_analytics/?tab=python#custom-instrumentation
span.set_tag(ANALYTICS_SAMPLE_RATE_KEY, 1.0)
# With cleanup handling engaged, allowing retries
dispatcher = SQSWorkDispatcher(queue, worker_can_start_child_processes=True)
def choose_job_by_message_attributes(message):
# Determine if this is a retry of this message, in which case job execution should know so it can
# do cleanup before proceeding with the job
q_msg_attr = message.attributes # the non-user-defined (queue-defined) attributes on the message
is_retry = False
if q_msg_attr.get('ApproximateReceiveCount') is not None:
is_retry = int(q_msg_attr.get('ApproximateReceiveCount')) > 1
msg_attr = message.message_attributes
if msg_attr and msg_attr.get('validation_type', {}).get('StringValue') == 'generation':
# Generating a file
job_signature = {"_job": validator_process_file_generation,
"file_gen_id": message.body,
"is_retry": is_retry}
else:
# Running validations (or generating a file from a Job)
a_agency_code = msg_attr.get('agency_code', {}).get('StringValue') if msg_attr else None
job_signature = {"_job": validator_process_job,
"job_id": message.body,
"agency_code": a_agency_code,
"is_retry": is_retry}
return job_signature
found_message = dispatcher.dispatch_by_message_attribute(choose_job_by_message_attributes)
if not found_message:
# Flag the the Datadog trace for dropping, since no trace-worthy activity happened on this poll
DatadogEagerlyDropTraceFilter.drop(span)
# When you receive an empty response from the queue, wait before trying again
time.sleep(1)
# If this process is exiting, don't poll for more work
keep_polling = not dispatcher.is_exiting
def validator_process_file_generation(file_gen_id, is_retry=False):
""" Retrieves a FileGeneration object based on its ID, and kicks off a file generation. Handles errors by ensuring
the FileGeneration (if exists) is no longer cached.
Args:
file_gen_id: ID of a FileGeneration object
is_retry: If this is not the very first time handling execution of this job. If True, cleanup is
performed before proceeding to retry the job
Raises:
Any Exceptions raised by the FileGenerationManager
"""
# Add args name and values as span tags on this trace
tag_data = locals()
with SubprocessTrace(
name=f"job.{JOB_TYPE}.file_generation",
service=JOB_TYPE.lower(),
span_type=SpanTypes.WORKER,
) as span:
file_gen_data = {}
span.set_tags(tag_data)
if is_retry:
if cleanup_generation(file_gen_id):
log_job_message(
logger=logger,
message="Attempting a retry of {} after successful retry-cleanup.".format(inspect.stack()[0][3]),
job_type=JOB_TYPE,
job_id=file_gen_id,
is_debug=True
)
else:
log_job_message(
logger=logger,
message="Retry of {} found to be not necessary after cleanup. "
"Returning from job with success.".format(inspect.stack()[0][3]),
job_type=JOB_TYPE,
job_id=file_gen_id,
is_debug=True
)
return
sess = GlobalDB.db().session
file_generation = None
try:
file_generation = sess.query(FileGeneration).filter_by(file_generation_id=file_gen_id).one_or_none()
if file_generation:
file_gen_data = {
'agency_code': file_generation.agency_code,
'agency_type': file_generation.agency_type,
'start_date': file_generation.start_date,
'end_date': file_generation.end_date,
'file_type': file_generation.file_type,
'file_path': file_generation.file_path,
}
span.resource = f"file_generation/{file_generation.file_type}"
span.set_tags(file_gen_data)
elif file_generation is None:
raise ResponseException('FileGeneration ID {} not found in database'.format(file_gen_id),
StatusCode.CLIENT_ERROR, None)
file_generation_manager = FileGenerationManager(sess, g.is_local, file_generation=file_generation)
file_generation_manager.generate_file()
except Exception as e:
# Log uncaught exceptions and fail all Jobs referencing this FileGeneration
error_data = {
'message': 'An unhandled exception occurred in the Validator during file generation',
'message_type': 'ValidatorInfo',
'file_generation_id': file_gen_id,
'traceback': traceback.format_exc()
}
if file_generation:
error_data.update(file_gen_data)
logger.error(error_data)
# Try to mark the Jobs as failed, but continue raising the original Exception if not possible
try:
if file_generation:
# Uncache the FileGeneration
sess.refresh(file_generation)
file_generation.is_cached_file = False
# Mark all Jobs waiting on this FileGeneration as failed
generation_jobs = sess.query(Job).filter_by(file_generation_id=file_gen_id).all()
for job in generation_jobs:
if job.job_status in RUNNING_STATUSES:
mark_job_status(job.job_id, 'failed')
sess.refresh(job)
job.file_generation_id = None
job.error_message = str(e)
sess.commit()
except Exception:
pass
# ResponseExceptions only occur at very specific times, and should not affect the Validator's
# future attempts at handling messages from SQS
if not isinstance(e, ResponseException):
raise e
def validator_process_job(job_id, agency_code, is_retry=False):
""" Retrieves a Job based on its ID, and kicks off a validation. Handles errors by ensuring the Job (if exists) is
no longer running.
Args:
job_id: ID of a Job
agency_code: CGAC or FREC code for agency, only required for file generations by Job
is_retry: If this is not the very first time handling execution of this job. If True, cleanup is
performed before proceeding to retry the job
Raises:
Any Exceptions raised by the GenerationManager or ValidationManager, excluding those explicitly handled
"""
# Add args name and values as span tags on this trace
tag_data = locals()
with SubprocessTrace(
name=f"job.{JOB_TYPE}.validation",
service=JOB_TYPE.lower(),
span_type=SpanTypes.WORKER,
) as span:
job_data = {}
span.set_tags(tag_data)
if is_retry:
if cleanup_validation(job_id):
log_job_message(
logger=logger,
message="Attempting a retry of {} after successful retry-cleanup.".format(inspect.stack()[0][3]),
job_type=JOB_TYPE,
job_id=job_id,
is_debug=True
)
else:
log_job_message(
logger=logger,
message="Retry of {} found to be not necessary after cleanup. "
"Returning from job with success.".format(inspect.stack()[0][3]),
job_type=JOB_TYPE,
job_id=job_id,
is_debug=True
)
return
sess = GlobalDB.db().session
job = None
try:
# Get the job
job = sess.query(Job).filter_by(job_id=job_id).one_or_none()
if job:
job_data = {
'submission_id': job.submission_id,
'job_type': job.job_type.name,
'file_type': job.file_type.name if job.file_type else None,
}
span.resource = job.job_type.name + (f"/{job.file_type.name}" if job.file_type else "")
span.set_tags(job_data)
elif job is None:
validation_error_type = ValidationError.jobError
write_file_error(job_id, None, validation_error_type)
raise ResponseException('Job ID {} not found in database'.format(job_id),
StatusCode.CLIENT_ERROR, None, validation_error_type)
mark_job_status(job_id, 'ready')
# We can either validate or generate a file based on Job ID
if job.job_type.name == 'file_upload':
# Generate A, E, or F file
file_generation_manager = FileGenerationManager(sess, g.is_local, job=job)
file_generation_manager.generate_file(agency_code)
else:
# Run validations
validation_manager = ValidationManager(g.is_local, CONFIG_SERVICES['error_report_path'])
validation_manager.validate_job(job.job_id)
except (ResponseException, csv.Error, UnicodeDecodeError, ValueError) as e:
# Handle exceptions explicitly raised during validation
error_data = {
'message': 'An exception occurred in the Validator',
'message_type': 'ValidatorInfo',
'job_id': job_id,
'traceback': traceback.format_exc()
}
if job:
error_data.update(job_data)
logger.error(error_data)
sess.refresh(job)
job.error_message = str(e)
if job.filename is not None:
error_type = ValidationError.unknownError
extra_info = None
if isinstance(e, UnicodeDecodeError):
error_type = ValidationError.encodingError
elif isinstance(e, ResponseException):
error_type = e.errorType
extra_info = e.extraInfo
write_file_error(job.job_id, job.filename, error_type, extra_info=extra_info)
mark_job_status(job.job_id, 'invalid')
else:
logger.error(error_data)
raise e
except Exception as e:
# Log uncaught exceptions and fail the Job
error_data = {
'message': 'An unhandled exception occurred in the Validator',
'message_type': 'ValidatorInfo',
'job_id': job_id,
'traceback': traceback.format_exc()
}
if job:
error_data.update(job_data)
logger.error(error_data)
# Try to mark the Job as failed, but continue raising the original Exception if not possible
try:
mark_job_status(job_id, 'failed')
sess.refresh(job)
job.error_message = str(e)
sess.commit()
except Exception:
pass
raise e
def cleanup_generation(file_gen_id):
""" Cleans up generation task if to be reused
Args:
file_gen_id: file generation id
Returns:
boolean whether or not it needs to be run again
"""
sess = GlobalDB.db().session
retry = False
gen = sess.query(FileGeneration).filter(FileGeneration.file_generation_id == file_gen_id).one_or_none()
if gen and not gen.file_path:
retry = True
elif gen:
running_jobs = sess.query(Job).filter(Job.file_generation_id == file_gen_id,
Job.job_status_id.in_(RUNNING_STATUSES))
retry = (running_jobs.count() > 0)
if retry:
gen.file_path = None
gen.is_cached_file = False
sess.commit()
return retry
def cleanup_validation(job_id):
""" Cleans up validation task if to be reused
Args:
job_id: ID of a Job
Returns:
boolean whether or not it needs to be run again
"""
sess = GlobalDB.db().session
retry = False
job = sess.query(Job).filter(Job.job_id == job_id).one_or_none()
if job and job.job_status_id in RUNNING_STATUSES:
if job.job_status_id not in READY_STATUSES:
job.job_status_id = JOB_STATUS_DICT['waiting']
sess.commit()
retry = True
return retry
if __name__ == "__main__":
configure_logging()
# Configure Tracer to drop traces of polls of the queue that have been flagged as uninteresting
DatadogEagerlyDropTraceFilter.activate()
run_app()
|
cc0-1.0
|
1d1102fa9081c6e5fb32f91d637414df
| 41.5875
| 118
| 0.575991
| 4.405224
| false
| false
| false
| false
|
fedspendingtransparency/data-act-broker-backend
|
dataactcore/migrations/versions/1fabe0bdd48c_adding_parent_duns_number_and_name_to_.py
|
1
|
1579
|
"""Adding parent duns number and name to (Published)AwardFinancialAssistance
Revision ID: 1fabe0bdd48c
Revises: 6973101b6853
Create Date: 2018-03-27 15:07:45.721751
"""
# revision identifiers, used by Alembic.
revision = '1fabe0bdd48c'
down_revision = '6973101b6853'
branch_labels = None
depends_on = None
from alembic import op
import sqlalchemy as sa
def upgrade(engine_name):
globals()["upgrade_%s" % engine_name]()
def downgrade(engine_name):
globals()["downgrade_%s" % engine_name]()
def upgrade_data_broker():
### commands auto generated by Alembic - please adjust! ###
op.add_column('award_financial_assistance', sa.Column('ultimate_parent_legal_enti', sa.Text(), nullable=True))
op.add_column('award_financial_assistance', sa.Column('ultimate_parent_unique_ide', sa.Text(), nullable=True))
op.add_column('published_award_financial_assistance', sa.Column('ultimate_parent_legal_enti', sa.Text(), nullable=True))
op.add_column('published_award_financial_assistance', sa.Column('ultimate_parent_unique_ide', sa.Text(), nullable=True))
### end Alembic commands ###
def downgrade_data_broker():
### commands auto generated by Alembic - please adjust! ###
op.drop_column('published_award_financial_assistance', 'ultimate_parent_unique_ide')
op.drop_column('published_award_financial_assistance', 'ultimate_parent_legal_enti')
op.drop_column('award_financial_assistance', 'ultimate_parent_unique_ide')
op.drop_column('award_financial_assistance', 'ultimate_parent_legal_enti')
### end Alembic commands ###
|
cc0-1.0
|
5131f69ce80dbd3193c6675132151ea6
| 33.326087
| 124
| 0.723876
| 3.215886
| false
| false
| false
| false
|
fedspendingtransparency/data-act-broker-backend
|
dataactbroker/decorators.py
|
1
|
2184
|
from flask import g
from functools import wraps
from webargs import fields as webargs_fields
from webargs.flaskparser import parser as webargs_parser
from dataactbroker.handlers.agency_handler import get_sub_tiers_from_perms
from dataactbroker.permissions import requires_login, separate_affiliations
from dataactcore.utils.responseException import ResponseException
from dataactcore.utils.statusCode import StatusCode
def convert_to_submission_id(fn):
""" Decorator which reads the request, looking for a submission key to convert into a submission_id parameter. The
provided function should have a submission_id parameter as its first argument.
Returns:
The submission ID that was found
Raises:
ResponseException: If a submission_id or submission parameter is not found
"""
@wraps(fn)
@requires_login # check login before checking submission_id
def wrapped(*args, **kwargs):
req_args = webargs_parser.parse({
'submission': webargs_fields.Int(),
'submission_id': webargs_fields.Int()
})
submission_id = req_args.get('submission', req_args.get('submission_id'))
if submission_id is None:
raise ResponseException("submission_id is required", StatusCode.CLIENT_ERROR)
return fn(submission_id, *args, **kwargs)
return wrapped
def get_fabs_sub_tier_agencies(fn):
""" Decorator which provides a list of all SubTierAgencies the user has FABS permissions for. The function should
have a sub_tier_agencies parameter as its first argument. """
@wraps(fn)
def wrapped(*args, **kwargs):
sub_tier_agencies = []
if g.user is not None:
# create list of affiliations
cgac_ids, frec_ids = separate_affiliations(g.user.affiliations, 'fabs')
# generate SubTierAgencies based on FABS permissions
all_cgac_sub_tiers, all_frec_sub_tiers = get_sub_tiers_from_perms(g.user.website_admin, cgac_ids, frec_ids)
sub_tier_agencies = all_cgac_sub_tiers.all() + all_frec_sub_tiers.all()
return fn(sub_tier_agencies, *args, **kwargs)
return wrapped
|
cc0-1.0
|
a4ced95bdca7f7b418976c3c6139f282
| 41
| 119
| 0.692766
| 3.886121
| false
| false
| false
| false
|
fedspendingtransparency/data-act-broker-backend
|
tests/unit/dataactvalidator/test_c9_award_financial.py
|
1
|
11665
|
from tests.unit.dataactcore.factories.staging import AwardFinancialFactory
from tests.unit.dataactcore.factories.staging import AwardFinancialAssistanceFactory
from tests.unit.dataactvalidator.utils import number_of_errors, query_columns
_FILE = 'c9_award_financial'
_TAS = 'c9_award_financial_tas'
def test_column_headers(database):
expected_subset = {'source_row_number', 'source_value_fain', 'source_value_uri',
'source_value_federal_action_obligation', 'difference', 'uniqueid_FAIN', 'uniqueid_URI'}
actual = set(query_columns(_FILE, database))
assert (actual & expected_subset) == expected_subset
def test_equal_fain(database):
""" Tests that File D2 (award financial assistance) fain matches File C (award financial) fain. """
tas = _TAS
afa = AwardFinancialAssistanceFactory(tas=tas, fain='aBc', uri=None, federal_action_obligation=1,
original_loan_subsidy_cost='1', record_type='2')
af = AwardFinancialFactory(tas=tas, submisson_id=afa.submission_id, fain=afa.fain.lower(), uri=None,
transaction_obligated_amou=1)
errors = number_of_errors(_FILE, database, models=[afa, af])
assert errors == 0
def test_equal_uri(database):
""" Tests that File D2 (award financial assistance) uri matches File C (award financial) uri. """
tas = _TAS
afa = AwardFinancialAssistanceFactory(tas=tas, fain=None, uri='xYz', federal_action_obligation=1,
original_loan_subsidy_cost='1', record_type='1')
af = AwardFinancialFactory(tas=tas, submisson_id=afa.submission_id, fain=None, uri=afa.uri.lower(),
transaction_obligated_amou=0)
errors = number_of_errors(_FILE, database, models=[afa, af])
assert errors == 0
def test_null_uri_fain(database):
""" Tests File D2 (award financial assistance) and File C (award financial)
having NULL values for both fain and uri.
"""
tas = _TAS
afa = AwardFinancialAssistanceFactory(tas=tas, fain=None, uri=None, federal_action_obligation=1,
original_loan_subsidy_cost='1')
af = AwardFinancialFactory(tas=tas, submisson_id=afa.submission_id, fain='abc', uri='def',
transaction_obligated_amou=1)
errors = number_of_errors(_FILE, database, models=[afa, af])
assert errors == 0
def test_both_fain_and_url_supplied(database):
""" Tests File D2 (award financial assistance) having both uri and fain populated. """
tas = _TAS
afa_1 = AwardFinancialAssistanceFactory(tas=tas, fain='aBc', uri='xYz', federal_action_obligation=1,
original_loan_subsidy_cost='1', record_type='2')
afa_2 = AwardFinancialAssistanceFactory(tas=tas, fain='dEf', uri='gHi', federal_action_obligation=1,
original_loan_subsidy_cost='1', record_type='1')
af_1 = AwardFinancialFactory(tas=tas, submisson_id=afa_1.submission_id, fain=afa_1.fain.lower(), uri=None,
transaction_obligated_amou=1)
af_2 = AwardFinancialFactory(tas=tas, submisson_id=afa_2.submission_id, fain=None, uri=afa_2.uri.lower(),
transaction_obligated_amou=0)
errors = number_of_errors(_FILE, database, models=[afa_1, afa_2, af_1, af_2])
assert errors == 0
def test_unequal_fain(database):
""" Tests File D2 (award financial assistance) fain different than File C (award financial) fain. """
tas = _TAS
afa = AwardFinancialAssistanceFactory(tas=tas, fain='abc', uri=None, federal_action_obligation=1,
original_loan_subsidy_cost='1', record_type='3')
af = AwardFinancialFactory(tas=tas, submisson_id=afa.submission_id, fain='xyz', uri=None,
transaction_obligated_amou=1)
errors = number_of_errors(_FILE, database, models=[afa, af])
assert errors == 1
def test_unequal_uri(database):
""" Tests File D2 (award financial assistance) uri different than File C (award financial) uri. """
tas = _TAS
afa = AwardFinancialAssistanceFactory(tas=tas, fain=None, uri='abc', federal_action_obligation=1,
original_loan_subsidy_cost='1', record_type='1')
af = AwardFinancialFactory(tas=tas, submisson_id=afa.submission_id, fain=None, uri='xyz',
transaction_obligated_amou=1)
errors = number_of_errors(_FILE, database, models=[afa, af])
assert errors == 1
def test_unequal_fain_null(database):
""" Tests non-NULL File D2 (award financial assistance) fain compared to NULL fain in File C (award financial). """
tas = _TAS
afa = AwardFinancialAssistanceFactory(tas=tas, fain='abc', uri=None, federal_action_obligation=1,
original_loan_subsidy_cost='1', record_type='2')
af = AwardFinancialFactory(tas=tas, submisson_id=afa.submission_id, fain=None, uri=None,
transaction_obligated_amou=1)
errors = number_of_errors(_FILE, database, models=[afa, af])
assert errors == 1
def test_unequal_fain_aggregate(database):
""" Tests File D2 (award financial assistance) uri different than File C (award financial) non-aggregate. """
tas = _TAS
afa = AwardFinancialAssistanceFactory(tas=tas, fain='abc', uri='xyz', federal_action_obligation=1,
original_loan_subsidy_cost='1', record_type='2')
af = AwardFinancialFactory(tas=tas, submisson_id=afa.submission_id, fain='abc', uri='abc',
transaction_obligated_amou=1)
errors = number_of_errors(_FILE, database, models=[afa, af])
assert errors == 0
def test_unequal_uri_non_aggregate(database):
""" Tests File D2 (award financial assistance) fain different than File C (award financial) aggregate. """
tas = _TAS
afa = AwardFinancialAssistanceFactory(tas=tas, fain='abc', uri='xyz', federal_action_obligation=1,
original_loan_subsidy_cost='1', record_type='1')
af = AwardFinancialFactory(tas=tas, submisson_id=afa.submission_id, fain='xyz', uri='xyz',
transaction_obligated_amou=1)
errors = number_of_errors(_FILE, database, models=[afa, af])
assert errors == 0
def test_unequal_uri_null(database):
""" Tests NULL File D2 (award financial assistance) uri compared to a non-NULL uri in File C (award financial). """
tas = _TAS
afa = AwardFinancialAssistanceFactory(tas=tas, fain=None, uri=None, federal_action_obligation=1,
original_loan_subsidy_cost='1', record_type='1')
af = AwardFinancialFactory(tas=tas, submisson_id=afa.submission_id, fain=None, uri='abc',
transaction_obligated_amou=1)
errors = number_of_errors(_FILE, database, models=[afa, af])
assert errors == 0
def test_zero_federal_action_obligation_and_original_loan_subsidy_cost(database):
""" Tests that a single warning is thrown for both a federal action obligation of 0 and an original loan subsidy
cost of 0.
"""
tas = _TAS
afa = AwardFinancialAssistanceFactory(tas=tas, fain='abc', uri=None, federal_action_obligation=0,
original_loan_subsidy_cost='0', record_type='3')
af = AwardFinancialFactory(tas=tas, submisson_id=afa.submission_id, fain=None, uri=None,
transaction_obligated_amou=1)
errors = number_of_errors(_FILE, database, models=[afa, af])
assert errors == 0
def test_ignored_and_failed_federal_action_obligation_values(database):
""" Tests that a single warning is thrown for both a federal action obligation of 0 and an original loan subsidy
cost of 0.
"""
tas = _TAS
afa = AwardFinancialAssistanceFactory(tas=tas, fain='abc', uri=None, federal_action_obligation=0,
original_loan_subsidy_cost='1', assistance_type='08', record_type='2')
afa_2 = AwardFinancialAssistanceFactory(tas=tas, fain='aBc', uri=None, federal_action_obligation=2,
original_loan_subsidy_cost='1', assistance_type='09', record_type='3')
af = AwardFinancialFactory(tas=tas, submisson_id=afa.submission_id, fain=None, uri=None,
transaction_obligated_amou=1)
errors = number_of_errors(_FILE, database, models=[afa, af, afa_2])
assert errors == 2
# Test that this is ignored if assistance type is 09
afa = AwardFinancialAssistanceFactory(tas=tas, fain='abc', uri=None, federal_action_obligation=0,
original_loan_subsidy_cost='1', assistance_type='09', record_type='2')
af = AwardFinancialFactory(tas=tas, submisson_id=afa.submission_id, fain=None, uri=None,
transaction_obligated_amou=1)
errors = number_of_errors(_FILE, database, models=[afa, af])
assert errors == 0
def test_ignored_and_failed_original_loan_subsidy_cost_values(database):
""" Tests that a single warning is thrown for both a federal action obligation of 0 and an original loan subsidy
cost of 0.
"""
tas = _TAS
afa = AwardFinancialAssistanceFactory(tas=tas, fain='abc', uri=None, federal_action_obligation=1,
original_loan_subsidy_cost='0', assistance_type='09', record_type='3')
afa_2 = AwardFinancialAssistanceFactory(tas=tas, fain='aBc', uri=None, federal_action_obligation=1,
original_loan_subsidy_cost='-2.3', assistance_type='09', record_type='2')
afa_3 = AwardFinancialAssistanceFactory(tas=tas, fain='abC', uri=None, federal_action_obligation=1,
original_loan_subsidy_cost='2.3', assistance_type='08', record_type='3')
af = AwardFinancialFactory(tas=tas, submisson_id=afa.submission_id, fain=None, uri=None,
transaction_obligated_amou=1)
errors = number_of_errors(_FILE, database, models=[afa, af, afa_2, afa_3])
assert errors == 3
# Test that this is ignored if assistance type is 08
afa = AwardFinancialAssistanceFactory(tas=tas, fain='abc', uri=None, federal_action_obligation=1,
original_loan_subsidy_cost='0', assistance_type='08', record_type='2')
afa_2 = AwardFinancialAssistanceFactory(tas=tas, fain='aBc', uri=None, federal_action_obligation=1,
original_loan_subsidy_cost='-2.3', assistance_type='08', record_type='3')
af = AwardFinancialFactory(tas=tas, submisson_id=afa.submission_id, fain=None, uri=None,
transaction_obligated_amou=1)
errors = number_of_errors(_FILE, database, models=[afa, af, afa_2])
assert errors == 0
def test_null_toa(database):
""" Tests that null TOA is ignored even though everything else matches. """
tas = _TAS
afa = AwardFinancialAssistanceFactory(tas=tas, fain='aBc', uri=None, federal_action_obligation=1,
original_loan_subsidy_cost='1', record_type='2')
af = AwardFinancialFactory(tas=tas, submisson_id=afa.submission_id, fain=afa.fain.lower(), uri=None,
transaction_obligated_amou=None)
errors = number_of_errors(_FILE, database, models=[afa, af])
assert errors == 1
|
cc0-1.0
|
ed40010346de9e3841390b5e5e4a0a4b
| 51.545045
| 119
| 0.629061
| 3.268423
| false
| true
| false
| false
|
fedspendingtransparency/data-act-broker-backend
|
dataactcore/migrations/versions/8ce3d6d070da_add_session_table.py
|
1
|
1155
|
"""add_session_table
Revision ID: 8ce3d6d070da
Revises: 885280875a1c
Create Date: 2016-10-27 20:04:26.845536
"""
# revision identifiers, used by Alembic.
revision = '8ce3d6d070da'
down_revision = '885280875a1c'
branch_labels = None
depends_on = None
from alembic import op
import sqlalchemy as sa
def upgrade(engine_name):
globals()["upgrade_%s" % engine_name]()
def downgrade(engine_name):
globals()["downgrade_%s" % engine_name]()
def upgrade_data_broker():
### commands auto generated by Alembic - please adjust! ###
op.create_table('session_map',
sa.Column('created_at', sa.DateTime(), nullable=True),
sa.Column('updated_at', sa.DateTime(), nullable=True),
sa.Column('session_id', sa.Integer(), nullable=False),
sa.Column('uid', sa.Text(), nullable=True),
sa.Column('data', sa.Text(), nullable=True),
sa.Column('expiration', sa.Integer(), nullable=True),
sa.PrimaryKeyConstraint('session_id')
)
### end Alembic commands ###
def downgrade_data_broker():
### commands auto generated by Alembic - please adjust! ###
op.drop_table('session_map')
### end Alembic commands ###
|
cc0-1.0
|
8126c6a37fb212dd7f016aa2bfbcf826
| 23.0625
| 63
| 0.671861
| 3.33815
| false
| false
| false
| false
|
fedspendingtransparency/data-act-broker-backend
|
tests/unit/dataactvalidator/test_fabs4_2.py
|
1
|
1489
|
from tests.unit.dataactcore.factories.staging import FABSFactory
from tests.unit.dataactvalidator.utils import number_of_errors, query_columns
_FILE = 'fabs4_2'
def test_column_headers(database):
expected_subset = {'row_number', 'action_date', 'uniqueid_AssistanceTransactionUniqueKey'}
actual = set(query_columns(_FILE, database))
assert expected_subset == actual
def test_success(database):
""" Tests if value of action date is between 19991001 and 20991231 (a date between 10/01/1999 and 12/31/2099). """
fabs_1 = FABSFactory(action_date='20120725', correction_delete_indicatr='')
fabs_2 = FABSFactory(action_date=None, correction_delete_indicatr='C')
# Ignore if not a valid date, different rule covers this
fabs_3 = FABSFactory(action_date='5', correction_delete_indicatr=None)
# Ignore correction delete indicator of D
fabs_4 = FABSFactory(action_date='19990131', correction_delete_indicatr='d')
errors = number_of_errors(_FILE, database, models=[fabs_1, fabs_2, fabs_3, fabs_4])
assert errors == 0
def test_failure(database):
""" Tests if value of action date is not between 19991001 and 20991231 (i.e., a date between 10/01/1999 and
12/31/2099).
"""
fabs_1 = FABSFactory(action_date='19990131', correction_delete_indicatr='c')
fabs_2 = FABSFactory(action_date='21000101', correction_delete_indicatr=None)
errors = number_of_errors(_FILE, database, models=[fabs_1, fabs_2])
assert errors == 2
|
cc0-1.0
|
35eac370bfc8b303496761980b0575e1
| 42.794118
| 118
| 0.714574
| 3.272527
| false
| true
| false
| false
|
fedspendingtransparency/data-act-broker-backend
|
dataactcore/scripts/fix_county_data.py
|
1
|
14033
|
import logging
import argparse
from dataactcore.interfaces.db import GlobalDB
from dataactcore.broker_logging import configure_logging
from dataactvalidator.health_check import create_app
logger = logging.getLogger(__name__)
def create_matviews(sess):
logger.info("Creating zip_county temporary view")
# zip_county view creation
sess.execute(
"""CREATE MATERIALIZED VIEW zip_county AS (
SELECT
concat(zip5, zip_last4) AS combined_zip,
concat(zip5, '-', zip_last4) AS dashed_zip,
zip5,
zip_last4,
zips.county_number,
county_name
FROM zips
LEFT OUTER JOIN county_code AS cc
ON cc.state_code = zips.state_abbreviation
AND cc.county_number = zips.county_number)"""
)
sess.commit()
logger.info("Created zip_county temporary view, creating zip_county index on zip5")
sess.execute("CREATE INDEX ix_zip5_zip_county ON zip_county (zip5)")
logger.info("Created zip_county index on zip5, creating zip_county index on zip_last4")
sess.execute("CREATE INDEX ix_zip_last4_zip_county ON zip_county (zip_last4)")
logger.info("Created zip_county index on zip_last4, creating zip_county index on combined_zip")
sess.execute("CREATE INDEX ix_combined_zip_zip_county ON zip_county (combined_zip)")
logger.info("Created zip_county index on combined_zip, creating zip_county index on dashed_zip")
sess.execute("CREATE INDEX ix_dashed_zip_zip_county ON zip_county (dashed_zip)")
sess.commit()
logger.info("Created zip_county indexes, creating single_county temporary view")
# single_county view creation
sess.execute(
"""CREATE MATERIALIZED VIEW single_county AS (
SELECT zip5, county_number, county_name
FROM (SELECT
zip5,
county_number,
county_name,
ROW_NUMBER() OVER (PARTITION BY
zip5) AS row
FROM zip_county) AS tmp
WHERE tmp.row = 1)"""
)
sess.commit()
logger.info("Created single_county temporary view, creating single_county index on zip5")
sess.execute("CREATE INDEX ix_zip5_single_county ON single_county (zip5)")
sess.commit()
logger.info("Created single_county index, matview creation complete.")
def delete_matviews(sess):
logger.info("Deleting matviews")
# zip_county view deletion
sess.execute("DROP MATERIALIZED VIEW IF EXISTS single_county")
sess.execute("DROP MATERIALIZED VIEW IF EXISTS zip_county")
sess.commit()
logger.info("Finished delete of matviews.")
def update_fpds_le(sess):
logger.info("Starting FPDS legal entity derivations, starting legal entity 9-digit zips without dashes")
# FPDS LE 9-digit no dash
sess.execute(
"""UPDATE detached_award_procurement AS dap
SET legal_entity_county_code = zc.county_number,
legal_entity_county_name = CASE WHEN dap.legal_entity_county_name IS NOT NULL
THEN dap.legal_entity_county_name
ELSE UPPER(zc.county_name) END
FROM zip_county AS zc
WHERE zc.combined_zip = dap.legal_entity_zip4
AND dap.legal_entity_county_code IS NULL
AND UPPER(dap.legal_entity_country_code) = 'USA'"""
)
sess.commit()
logger.info("Finished FPDS legal entity 9-digit zips without dashes, starting FPDS legal entity 9-digit zips "
"with dashes")
# FPDS LE 9-digit dash
sess.execute(
"""UPDATE detached_award_procurement AS dap
SET legal_entity_county_code = zc.county_number,
legal_entity_county_name = CASE WHEN dap.legal_entity_county_name IS NOT NULL
THEN dap.legal_entity_county_name
ELSE UPPER(zc.county_name) END
FROM zip_county AS zc
WHERE zc.dashed_zip = dap.legal_entity_zip4
AND dap.legal_entity_county_code IS NULL
AND UPPER(dap.legal_entity_country_code) = 'USA'"""
)
sess.commit()
logger.info("Finished FPDS legal entity 9-digit zips with dashes, starting FPDS legal entity 5-digit zips")
# FPDS LE 5-digit
sess.execute(
"""UPDATE detached_award_procurement AS dap
SET legal_entity_county_code = sc.county_number,
legal_entity_county_name = CASE WHEN dap.legal_entity_county_name IS NOT NULL
THEN dap.legal_entity_county_name
ELSE UPPER(sc.county_name) END
FROM single_county AS sc
WHERE sc.zip5 = LEFT(dap.legal_entity_zip4, 5)
AND dap.legal_entity_county_code IS NULL
AND dap.legal_entity_zip4 ~ '^\d{5}(-?\d{4})?$'
AND UPPER(dap.legal_entity_country_code) = 'USA'"""
)
sess.commit()
logger.info("Finished FPDS legal entity 5-digit zips, FPDS legal entity updates complete.")
def update_fpds_ppop(sess):
logger.info("Starting FPDS PPOP derivations, starting FPDS PPOP 9-digit zips without dashes")
# FPDS PPOP 9-digit no dash
sess.execute(
"""UPDATE detached_award_procurement AS dap
SET place_of_perform_county_co = zc.county_number,
place_of_perform_county_na = CASE WHEN dap.place_of_perform_county_na IS NOT NULL
THEN dap.place_of_perform_county_na
ELSE UPPER(zc.county_name) END
FROM zip_county AS zc
WHERE zc.combined_zip = dap.place_of_performance_zip4a
AND dap.place_of_perform_county_co IS NULL
AND UPPER(dap.place_of_perform_country_c) = 'USA'"""
)
sess.commit()
logger.info("Finished FPDS PPOP 9-digit zips without dashes, starting FPDS PPOP 9-digit zips with dashes")
# FPDS PPOP 9-digit dash
sess.execute(
"""UPDATE detached_award_procurement AS dap
SET place_of_perform_county_co = zc.county_number,
place_of_perform_county_na = CASE WHEN dap.place_of_perform_county_na IS NOT NULL
THEN dap.place_of_perform_county_na
ELSE UPPER(zc.county_name) END
FROM zip_county AS zc
WHERE zc.dashed_zip = dap.place_of_performance_zip4a
AND dap.place_of_perform_county_co IS NULL
AND UPPER(dap.place_of_perform_country_c) = 'USA'"""
)
sess.commit()
logger.info("Finished FPDS PPOP 9-digit zips with dashes, starting FPDS PPOP 5-digit zips")
# FPDS PPOP 5-digit
sess.execute(
"""UPDATE detached_award_procurement AS dap
SET place_of_perform_county_co = sc.county_number,
place_of_perform_county_na = CASE WHEN dap.place_of_perform_county_na IS NOT NULL
THEN dap.place_of_perform_county_na
ELSE UPPER(sc.county_name) END
FROM single_county AS sc
WHERE sc.zip5 = LEFT(dap.place_of_performance_zip4a, 5)
AND dap.place_of_perform_county_co IS NULL
AND dap.place_of_performance_zip4a ~ '^\d{5}(-?\d{4})?$'
AND UPPER(dap.place_of_perform_country_c) = 'USA'"""
)
sess.commit()
logger.info("Finished FPDS PPOP 5-digit zips, FPDS PPOP updates complete")
def update_fabs_le(sess):
logger.info("Starting FABS legal entity derivations, starting FABS legal entity 9-digit zips")
# FABS LE 9-digit
sess.execute(
"""UPDATE published_fabs AS pf
SET legal_entity_county_code = zc.county_number,
legal_entity_county_name = CASE WHEN pf.legal_entity_county_name IS NOT NULL
THEN pf.legal_entity_county_name
ELSE zc.county_name END
FROM zip_county AS zc
WHERE zc.zip5 = pf.legal_entity_zip5
AND zc.zip_last4 = pf.legal_entity_zip_last4
AND pf.legal_entity_county_code IS NULL
AND UPPER(pf.legal_entity_country_code) = 'USA'
AND pf.is_active = True"""
)
sess.commit()
logger.info("Finished FABS legal entity 9-digit zips, starting FABS legal entity 5-digit zips")
# FABS LE 5-digit
sess.execute(
"""UPDATE published_fabs AS pf
SET legal_entity_county_code = sc.county_number,
legal_entity_county_name = CASE WHEN pf.legal_entity_county_name IS NOT NULL
THEN pf.legal_entity_county_name
ELSE sc.county_name END
FROM single_county AS sc
WHERE sc.zip5 = pf.legal_entity_zip5
AND pf.legal_entity_county_code IS NULL
AND UPPER(pf.legal_entity_country_code) = 'USA'
AND pf.is_active = True"""
)
sess.commit()
logger.info("Finished FABS legal 5-digit zips, FABS legal entity updates complete")
def update_fabs_ppop(sess):
logger.info("Starting FABS PPOP derivations, starting FABS PPOP 9-digit zips without dashes")
# FABS PPOP 9-digit no dash
sess.execute(
"""UPDATE published_fabs AS pf
SET place_of_perform_county_co = zc.county_number,
place_of_perform_county_na = CASE WHEN pf.place_of_perform_county_na IS NOT NULL
THEN pf.place_of_perform_county_na
ELSE zc.county_name END
FROM zip_county AS zc
WHERE zc.combined_zip = pf.place_of_performance_zip4a
AND pf.place_of_perform_county_co IS NULL
AND UPPER(pf.place_of_perform_country_c) = 'USA'
AND pf.is_active = True"""
)
sess.commit()
logger.info("Finished FABS PPOP 9-digit zips without dashes, starting FABS PPOP 9-digit zips with dashes")
# FABS PPOP 9-digit dash
sess.execute(
"""UPDATE published_fabs AS pf
SET place_of_perform_county_co = zc.county_number,
place_of_perform_county_na = CASE WHEN pf.place_of_perform_county_na IS NOT NULL
THEN pf.place_of_perform_county_na
ELSE zc.county_name END
FROM zip_county AS zc
WHERE zc.dashed_zip = pf.place_of_performance_zip4a
AND pf.place_of_perform_county_co IS NULL
AND UPPER(pf.place_of_perform_country_c) = 'USA'
AND pf.is_active = True"""
)
sess.commit()
logger.info("Finished FABS PPOP 9-digit zips with dashes, starting FABS PPOP 5-digit zips")
# FABS PPOP 5-digit
sess.execute(
"""UPDATE published_fabs AS pf
SET place_of_perform_county_co = sc.county_number,
place_of_perform_county_na = CASE WHEN pf.place_of_perform_county_na IS NOT NULL
THEN pf.place_of_perform_county_na
ELSE sc.county_name END
FROM single_county AS sc
WHERE sc.zip5 = LEFT(pf.place_of_performance_zip4a, 5)
AND pf.place_of_perform_county_co IS NULL
AND pf.place_of_performance_zip4a ~ '^\d{5}(-?\d{4})?$'
AND UPPER(pf.place_of_perform_country_c) = 'USA'
AND pf.is_active = True"""
)
sess.commit()
logger.info("Finished FABS PPOP 5-digit zips, FABS PPOP updates complete.")
def main():
sess = GlobalDB.db().session
parser = argparse.ArgumentParser(description='Pull data from the FPDS Atom Feed.')
parser.add_argument('-mv', '--matview', help='Create the matviews, make sure they do not already exist',
action='store_true')
parser.add_argument('-dmv', '--delete_matview', help='Delete the matviews', action='store_true')
parser.add_argument('-fpdsle', '--fpds_le', help='Run FPDS Legal Entity updates', action='store_true')
parser.add_argument('-fpdsppop', '--fpds_ppop', help='Run FPDS PPOP updates', action='store_true')
parser.add_argument('-fabsle', '--fabs_le', help='Run FABS Legal Entity updates', action='store_true')
parser.add_argument('-fabsppop', '--fabs_ppop', help='Run FABS PPOP updates', action='store_true')
parser.add_argument('-a', '--all', help='Run all updates without creating or deleting matviews',
action='store_true')
parser.add_argument('-am', '--all_matview', help='Run all updates and create and delete matviews',
action='store_true')
args = parser.parse_args()
logger.info("Starting county code fixes")
if args.all_matview or args.all:
if args.all_matview:
create_matviews(sess)
update_fpds_le(sess)
update_fpds_ppop(sess)
update_fabs_le(sess)
update_fabs_ppop(sess)
if args.all_matview:
delete_matviews(sess)
else:
if args.matview:
create_matviews(sess)
if args.fpds_le:
update_fpds_le(sess)
if args.fpds_ppop:
update_fpds_ppop(sess)
if args.fabs_le:
update_fabs_le(sess)
if args.fabs_ppop:
update_fabs_ppop(sess)
if args.delete_matview:
delete_matviews(sess)
logger.info("Completed county code fixes")
if __name__ == '__main__':
with create_app().app_context():
configure_logging()
main()
|
cc0-1.0
|
46fce4baf8364689effa0695801f03ab
| 41.783537
| 114
| 0.581344
| 3.717351
| false
| false
| false
| false
|
fedspendingtransparency/data-act-broker-backend
|
dataactcore/migrations/versions/e9c556a9f344_create_certified_comment_table.py
|
1
|
1590
|
"""Create certified_comment table
Revision ID: e9c556a9f344
Revises: 0dc4a1fbb52e
Create Date: 2019-08-29 12:12:13.196702
"""
# revision identifiers, used by Alembic.
revision = 'e9c556a9f344'
down_revision = 'b998d20b46e6'
branch_labels = None
depends_on = None
from alembic import op
import sqlalchemy as sa
def upgrade(engine_name):
globals()["upgrade_%s" % engine_name]()
def downgrade(engine_name):
globals()["downgrade_%s" % engine_name]()
def upgrade_data_broker():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('certified_comment',
sa.Column('created_at', sa.DateTime(), nullable=True),
sa.Column('updated_at', sa.DateTime(), nullable=True),
sa.Column('certified_comment_id', sa.Integer(), nullable=False),
sa.Column('submission_id', sa.Integer(), nullable=False),
sa.Column('file_type_id', sa.Integer(), nullable=False),
sa.Column('comment', sa.Text(), nullable=False),
sa.ForeignKeyConstraint(['file_type_id'], ['file_type.file_type_id'], name='fk_file_type'),
sa.ForeignKeyConstraint(['submission_id'], ['submission.submission_id'], name='fk_submission', ondelete='CASCADE'),
sa.PrimaryKeyConstraint('certified_comment_id'),
sa.UniqueConstraint('submission_id', 'file_type_id', name='uniq_cert_comment_submission_file_type')
)
# ### end Alembic commands ###
def downgrade_data_broker():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_table('certified_comment')
# ### end Alembic commands ###
|
cc0-1.0
|
2fcd10522a1cd66171d4a475dbda3754
| 30.176471
| 123
| 0.669182
| 3.479212
| false
| false
| false
| false
|
fedspendingtransparency/data-act-broker-backend
|
dataactcore/migrations/versions/4d64c79360af_add_upper_uei_indexes_to_tables.py
|
1
|
1715
|
"""Add upper UEI indexes to tables
Revision ID: 4d64c79360af
Revises: 9e295cff8733
Create Date: 2021-08-05 12:27:08.745832
"""
# revision identifiers, used by Alembic.
revision = '4d64c79360af'
down_revision = '9e295cff8733'
branch_labels = None
depends_on = None
from alembic import op
from sqlalchemy import text
def upgrade(engine_name):
globals()["upgrade_%s" % engine_name]()
def downgrade(engine_name):
globals()["downgrade_%s" % engine_name]()
def upgrade_data_broker():
# ### commands auto generated by Alembic - please adjust! ###
op.create_index('ix_pafa_uei_upper', 'published_award_financial_assistance', [text('UPPER(uei)')], unique=False)
op.create_index('ix_dafa_uei_upper', 'detached_award_financial_assistance', [text('UPPER(uei)')], unique=False)
op.create_index('ix_dap_awardee_or_recipient_uei_upper', 'detached_award_procurement', [text('UPPER(awardee_or_recipient_uei)')], unique=False)
op.create_index('ix_duns_uei_upper', 'duns', [text('UPPER(uei)')], unique=False)
op.create_index('ix_historic_duns_uei_upper', 'historic_duns', [text('UPPER(uei)')], unique=False)
# ### end Alembic commands ###
def downgrade_data_broker():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_index('ix_historic_duns_uei_upper', table_name='historic_duns')
op.drop_index('ix_duns_uei_upper', table_name='duns')
op.drop_index('ix_dap_awardee_or_recipient_uei_upper', table_name='detached_award_procurement')
op.drop_index('ix_dafa_uei_upper', table_name='detached_award_financial_assistance')
op.drop_index('ix_pafa_uei_upper', table_name='published_award_financial_assistance')
# ### end Alembic commands ###
|
cc0-1.0
|
bfb9b316af61f0982881fc3b63f486eb
| 34.729167
| 147
| 0.699125
| 2.825371
| false
| false
| false
| false
|
fedspendingtransparency/data-act-broker-backend
|
dataactcore/migrations/versions/ea8fbaa044d7_remove_first_quarter_flag.py
|
2
|
1601
|
"""remove_first_quarter_flag
Revision ID: ea8fbaa044d7
Revises: 0c857b50962a
Create Date: 2016-10-14 14:04:16.207464
"""
# revision identifiers, used by Alembic.
revision = 'ea8fbaa044d7'
down_revision = '0c857b50962a'
branch_labels = None
depends_on = None
from alembic import op
import sqlalchemy as sa
def upgrade(engine_name):
globals()["upgrade_%s" % engine_name]()
def downgrade(engine_name):
globals()["downgrade_%s" % engine_name]()
def upgrade_data_broker():
### commands auto generated by Alembic - please adjust! ###
op.drop_column('appropriation', 'is_first_quarter')
op.drop_column('award_financial', 'is_first_quarter')
op.drop_column('award_financial_assistance', 'is_first_quarter')
op.drop_column('object_class_program_activity', 'is_first_quarter')
### end Alembic commands ###
def downgrade_data_broker():
### commands auto generated by Alembic - please adjust! ###
op.add_column('object_class_program_activity', sa.Column('is_first_quarter', sa.BOOLEAN(), server_default=sa.text('false'), autoincrement=False, nullable=False))
op.add_column('award_financial_assistance', sa.Column('is_first_quarter', sa.BOOLEAN(), server_default=sa.text('false'), autoincrement=False, nullable=False))
op.add_column('award_financial', sa.Column('is_first_quarter', sa.BOOLEAN(), server_default=sa.text('false'), autoincrement=False, nullable=False))
op.add_column('appropriation', sa.Column('is_first_quarter', sa.BOOLEAN(), server_default=sa.text('false'), autoincrement=False, nullable=False))
### end Alembic commands ###
|
cc0-1.0
|
544ded8d6d2d821fe7ac3c07fbaa4f4c
| 33.804348
| 165
| 0.71143
| 3.321577
| false
| false
| false
| false
|
fedspendingtransparency/data-act-broker-backend
|
dataactcore/config.py
|
1
|
5488
|
import logging
import os.path
from os.path import expanduser, normpath, dirname, abspath
import yaml
import re
CONFIG_BROKER = {}
CONFIG_SERVICES = {}
CONFIG_DB = {}
CONFIG_LOGGING = {}
CONFIG_CATEGORIES = {"broker": CONFIG_BROKER, "services": CONFIG_SERVICES, "db": CONFIG_DB,
"logging": CONFIG_LOGGING}
# set the location of the DATA Act broker config files
CONFIG_PATH = os.path.join(dirname(abspath(__file__)), 'config.yml')
if "env" in os.environ:
env = os.environ["env"]
else:
env = "local"
ENV_PATH = os.path.join(dirname(abspath(__file__)), '{}_config.yml'.format(env))
SECRET_PATH = os.path.join(dirname(abspath(__file__)), '{}_secrets.yml'.format(env))
path_list = [CONFIG_PATH, ENV_PATH, SECRET_PATH]
# set the location of the Alembic config file
ALEMBIC_PATH = os.path.join(dirname(abspath(__file__)), 'alembic.ini')
MIGRATION_PATH = os.path.join(dirname(abspath(__file__)), 'migrations')
for config_path in path_list:
try:
with open(config_path) as c:
# Default to empty dictionary if file is empty
CONFIG_ALL = yaml.load(c, Loader=yaml.FullLoader) or {}
except IOError:
raise IOError('Error reading a config file. Please make sure this file exists'
' before starting the DATA Act broker: {}'.format(config_path))
for category_name in CONFIG_CATEGORIES:
CONFIG_CATEGORIES[category_name].update(CONFIG_ALL.get(category_name, {}))
# Get path to installation
CONFIG_BROKER['path'] = dirname(dirname(abspath(__file__)))
# for backward-compatibility, differentiate between local runs and AWS
if CONFIG_BROKER['use_aws'] is True or CONFIG_BROKER['use_aws'] == "true":
CONFIG_BROKER['local'] = False
# AWS flag is on, so make sure all needed AWS info is present
required_aws_keys = ['aws_bucket', 'aws_region']
for k in required_aws_keys:
try:
CONFIG_BROKER[k]
except KeyError:
raise KeyError('Config error: use_aws is True, but the {} key is missing from the config.yml file'.
format(k))
if not CONFIG_BROKER[k]:
raise ValueError('Config error: use_aws is True but {} value is missing'.format(k))
else:
CONFIG_BROKER['local'] = True
CONFIG_BROKER['aws_bucket'] = None
CONFIG_BROKER['aws_region'] = None
# if not using AWS and no error report path specified,
# default to `data_act_broker` in user's home dir
error_report_path = CONFIG_SERVICES['error_report_path']
if not error_report_path:
error_report_path = os.path.join(expanduser('~'), 'data_act_broker')
normpath(error_report_path)
CONFIG_SERVICES['error_report_path'] = error_report_path
storage_path = CONFIG_BROKER['d_file_storage_path']
if storage_path[-1] != os.path.sep:
CONFIG_BROKER['d_file_storage_path'] = "".join([storage_path, os.path.sep])
error_storage_path = CONFIG_SERVICES['error_report_path']
if error_storage_path[-1] != os.path.sep:
CONFIG_SERVICES['error_report_path'] = "".join([error_storage_path, os.path.sep])
# if no broker file path specified,
# default to `data_act_broker` in user's home dir
broker_files = CONFIG_BROKER['broker_files']
if not broker_files:
broker_files = os.path.join(expanduser('~'), 'data_act_broker')
elif len(os.path.splitext(broker_files)[1]):
# if config's broker_files is set to a actual filename
# just use the directory
broker_files = os.path.split(broker_files)[0]
normpath(broker_files)
if broker_files[-1] != os.path.sep:
broker_files += os.path.sep
CONFIG_BROKER['broker_files'] = broker_files
# normalize logging path, if given
log_path = CONFIG_LOGGING['log_files']
if log_path:
CONFIG_LOGGING['log_files'] = normpath(log_path)
# we don't want http:// or ports in the host variables
CONFIG_SERVICES['broker_api_host'] = re.sub(
'http://|:(.*)', '', CONFIG_SERVICES['broker_api_host'])
CONFIG_SERVICES['validator_host'] = re.sub(
'http://|:(.*)', '', CONFIG_SERVICES['validator_host'])
# if hosts in config file are set to 0.0.0.0, override to
# 127.0.0.1 for cross-platform compatibility
if env != "local" and CONFIG_SERVICES['broker_api_host'] == '0.0.0.0':
CONFIG_SERVICES['broker_api_host'] = '127.0.0.1'
if CONFIG_SERVICES["broker_api_port"] == 443:
# Use https
CONFIG_SERVICES["protocol"] = "https"
else:
CONFIG_SERVICES["protocol"] = "http"
# Log some values from config
log_message = ""
if "values_to_log" in CONFIG_LOGGING:
# If no values specified, don't do logging
for category_yaml_name in CONFIG_LOGGING["values_to_log"]:
category = CONFIG_CATEGORIES[category_yaml_name]
category_message = "### {}".format(category_yaml_name)
for key in CONFIG_LOGGING["values_to_log"][category_yaml_name]:
value = category.get(key, "Value not provided in config")
category_message = "{}, {}: {}".format(category_message, key, value)
log_message = " ".join([log_message, category_message])
# Log config values along with warnings for missing files
if log_message:
# Logging is not configured yet; create a console logger to print this
# message
_logger = logging.getLogger('config-printer')
_logger.setLevel(logging.INFO)
_logger.addHandler(logging.FileHandler(os.path.join(
CONFIG_LOGGING['log_files'], 'info.log')))
_logger.info(log_message)
# TODO: error-handling for db config?
# TODO: type checking and fixing for int stuff like ports?
|
cc0-1.0
|
7efadc44d15e2d3c6aa8392236ed2971
| 39.651852
| 111
| 0.668367
| 3.440752
| false
| true
| false
| false
|
fedspendingtransparency/data-act-broker-backend
|
tests/unit/dataactvalidator/test_fabs42_2.py
|
1
|
2936
|
from tests.unit.dataactcore.factories.staging import FABSFactory
from tests.unit.dataactvalidator.utils import number_of_errors, query_columns
_FILE = 'fabs42_2'
def test_column_headers(database):
expected_subset = {'row_number', 'place_of_performance_forei', 'place_of_perform_country_c', 'record_type',
'uniqueid_AssistanceTransactionUniqueKey'}
actual = set(query_columns(_FILE, database))
assert expected_subset == actual
def test_success(database):
""" Test PrimaryPlaceOfPerformanceForeignLocationDescription must be blank for domestic recipients
(i.e., when PrimaryPlaceOfPerformanceCountryCode = USA). This test shouldn't care about content when
country_code is not USA (that is for another validation) or for aggregate and PII-redacted non-aggregate
records (RecordType=1 or 3).
"""
fabs_1 = FABSFactory(place_of_performance_forei='description', place_of_perform_country_c='UK', record_type=2,
correction_delete_indicatr='')
fabs_2 = FABSFactory(place_of_performance_forei=None, place_of_perform_country_c='UK', record_type=3,
correction_delete_indicatr=None)
fabs_3 = FABSFactory(place_of_performance_forei=None, place_of_perform_country_c='USA', record_type=2,
correction_delete_indicatr='c')
fabs_4 = FABSFactory(place_of_performance_forei='', place_of_perform_country_c='UsA', record_type=1,
correction_delete_indicatr='C')
fabs_5 = FABSFactory(place_of_performance_forei=None, place_of_perform_country_c='UsA', record_type=3,
correction_delete_indicatr='')
# Ignore correction delete indicator of D
fabs_6 = FABSFactory(place_of_performance_forei='Test', place_of_perform_country_c='USA', record_type=2,
correction_delete_indicatr='d')
errors = number_of_errors(_FILE, database, models=[fabs_1, fabs_2, fabs_3, fabs_4, fabs_5, fabs_6])
assert errors == 0
def test_failure(database):
""" Test failure PrimaryPlaceOfPerformanceForeignLocationDescription must be blank for domestic recipients
(i.e., when PrimaryPlaceOfPerformanceCountryCode = USA) or for aggregate and PII-redacted non-aggregate records
(RecordType=1 or 3).
"""
fabs_1 = FABSFactory(place_of_performance_forei='Test', place_of_perform_country_c='USA', record_type=2,
correction_delete_indicatr='')
fabs_2 = FABSFactory(place_of_performance_forei='Content', place_of_perform_country_c='usa', record_type=1,
correction_delete_indicatr=None)
fabs_3 = FABSFactory(place_of_performance_forei='Content', place_of_perform_country_c='CAN', record_type=3,
correction_delete_indicatr='c')
errors = number_of_errors(_FILE, database, models=[fabs_1, fabs_2, fabs_3])
assert errors == 3
|
cc0-1.0
|
d8fa0aacdf4eaead04e031c4f183c7d5
| 54.396226
| 119
| 0.675068
| 3.541616
| false
| true
| false
| false
|
fedspendingtransparency/data-act-broker-backend
|
dataactbroker/handlers/account_handler.py
|
1
|
21072
|
import logging
from operator import attrgetter
import requests
import xmltodict
from flask import g
from dataactbroker.handlers.aws.sesEmail import SesEmail
from dataactbroker.handlers.aws.session import LoginSession
from dataactcore.utils.jsonResponse import JsonResponse
from dataactcore.utils.requestDictionary import RequestDictionary
from dataactcore.interfaces.db import GlobalDB
from sqlalchemy.orm.exc import MultipleResultsFound
from sqlalchemy import func, or_
from dataactcore.models.userModel import User, UserAffiliation
from dataactcore.models.domainModels import CGAC, FREC
from dataactcore.models.jobModels import Submission
from dataactcore.utils.statusCode import StatusCode
from dataactcore.interfaces.function_bag import get_email_template, check_correct_password
from dataactcore.config import CONFIG_BROKER
from dataactcore.models.lookups import PERMISSION_SHORT_DICT, DABS_PERMISSION_ID_LIST, FABS_PERMISSION_ID_LIST
logger = logging.getLogger(__name__)
class AccountHandler:
""" This class contains the login / logout functions
Attributes:
is_local: A boolean indicating if the application is being run locally or not
request: A Flask object containing the data from the request
bcrypt: A Bcrypt object associated with the app
Constants:
FRONT_END: A string indicating the URL of the front end of the app
"""
# Handles login process, compares username and password provided
FRONT_END = ""
# Instance fields include request, response, logFlag, and logFile
def __init__(self, request, bcrypt=None, is_local=False):
""" Creates the Login Handler
Args:
request: Flask request object
bcrypt: Bcrypt object associated with app
"""
self.is_local = is_local
self.request = request
self.bcrypt = bcrypt
def login(self, session):
""" Logs a user in if their password matches using local data
Args:
session: the Session object from flask
Returns:
A JsonResponse containing the user information or details on which error occurred, such as whether a
type was wrong, something wasn't implemented, invalid keys were provided, login was denied, or a
different, unexpected error occurred.
"""
try:
sess = GlobalDB.db().session
safe_dictionary = RequestDictionary(self.request)
username = safe_dictionary.get_value('username')
password = safe_dictionary.get_value('password')
try:
user = sess.query(User).filter(func.lower(User.email) == func.lower(username)).one()
except Exception:
raise ValueError("Invalid username and/or password")
try:
if check_correct_password(user, password, self.bcrypt):
# We have a valid login
return self.create_session_and_response(session, user)
else:
raise ValueError("Invalid username and/or password")
except ValueError as ve:
LoginSession.logout(session)
raise ve
except Exception as e:
LoginSession.logout(session)
raise e
# Catch any specifically raised errors or any other errors that may have happened and return them cleanly
except (TypeError, KeyError, NotImplementedError) as e:
# Return a 400 with appropriate message
return JsonResponse.error(e, StatusCode.CLIENT_ERROR)
except ValueError as e:
# Return a 401 for login denied
return JsonResponse.error(e, StatusCode.LOGIN_REQUIRED)
except Exception as e:
# Return 500
return JsonResponse.error(e, StatusCode.INTERNAL_ERROR)
def max_login(self, session):
""" Logs a user in if their password matches using MAX
Args:
session: Session object from flask
Returns:
A JsonResponse containing the user information or details on which error occurred, such as whether a
type was wrong, something wasn't implemented, invalid keys were provided, login was denied, or a
different, unexpected error occurred.
"""
try:
safe_dictionary = RequestDictionary(self.request)
ticket = safe_dictionary.get_value("ticket")
service = safe_dictionary.get_value('service')
# Call MAX's serviceValidate endpoint and retrieve the response
max_dict = get_max_dict(ticket, service)
if 'cas:authenticationSuccess' not in max_dict['cas:serviceResponse']:
raise ValueError("The Max CAS endpoint was unable to locate your session "
"using the ticket/service combination you provided.")
cas_attrs = max_dict['cas:serviceResponse']['cas:authenticationSuccess']['cas:attributes']
# Grab MAX ID to see if a service account is being logged in
max_id_components = cas_attrs['maxAttribute:MAX-ID'].split('_')
service_account_flag = (len(max_id_components) > 1 and max_id_components[0].lower() == 's')
# Grab the email and list of groups from MAX's response
email = cas_attrs['maxAttribute:Email-Address']
try:
sess = GlobalDB.db().session
user = sess.query(User).filter(func.lower(User.email) == func.lower(email)).one_or_none()
# If the user does not exist, create them since they are allowed to access the site because they got
# past the above group membership checks
if user is None:
user = User()
user.email = email
set_user_name(user, cas_attrs)
set_max_perms(user, cas_attrs['maxAttribute:GroupList'], service_account_flag)
sess.add(user)
sess.commit()
except MultipleResultsFound:
raise ValueError("An error occurred during login.")
return self.create_session_and_response(session, user)
# Catch any specifically raised errors or any other errors that may have happened and return them cleanly.
# We add the error parameter here because this endpoint needs to provide better feedback, and to avoid changing
# the default behavior of the JsonResponse class globally.
except (TypeError, KeyError, NotImplementedError) as e:
# Return a 400 with appropriate message
return JsonResponse.error(e, StatusCode.CLIENT_ERROR, error=str(e))
except ValueError as e:
# Return a 401 for login denied
return JsonResponse.error(e, StatusCode.LOGIN_REQUIRED, error=str(e))
except Exception as e:
# Return 500
return JsonResponse.error(e, StatusCode.INTERNAL_ERROR, error=str(e))
@staticmethod
def create_session_and_response(session, user):
""" Create a session.
Args:
session: Session object from flask
user: Users object
Returns:
JsonResponse containing the JSON for the user
"""
LoginSession.login(session, user.user_id)
data = json_for_user(user, session['sid'])
data['message'] = 'Login successful'
return JsonResponse.create(StatusCode.OK, data)
@staticmethod
def set_skip_guide(skip_guide):
""" Set current user's skip guide parameter
Args:
skip_guide: boolean indicating whether the skip guide should be visible or not for this user
Returns:
JsonResponse object containing results of setting the skip guide or details of the error that occurred.
Possible errors include the request not containing a skip_guide parameter or it not being a boolean
value
"""
sess = GlobalDB.db().session
g.user.skip_guide = skip_guide
sess.commit()
return JsonResponse.create(StatusCode.OK, {'message': 'skip_guide set successfully', 'skip_guide': skip_guide})
@staticmethod
def email_users(submission, system_email, template_type, user_ids):
""" Send email notification to list of users
Args:
submission: the submission to send the email about
system_email: the address of the system to send the email from
template_type: the template type of the email to send
user_ids: A list of user IDs denoting who to send the email to
Returns:
A JsonReponse containing a message that the email sent successfully or the details of the missing
or incorrect parameters
"""
sess = GlobalDB.db().session
if submission.cgac_code:
agency = sess.query(CGAC).filter_by(cgac_code=submission.cgac_code).first()
else:
agency = sess.query(FREC).filter_by(frec_code=submission.frec_code).first()
if not agency:
return JsonResponse.error(ValueError("The requested submission is not aligned to a valid CGAC or FREC "
"agency"), StatusCode.CLIENT_ERROR)
# Check if email template type is valid
get_email_template(template_type)
link = "".join([AccountHandler.FRONT_END, '#/submission/', str(submission.submission_id)])
email_template = {'[REV_USER_NAME]': g.user.name, '[REV_AGENCY]': agency.agency_name, '[REV_URL]': link}
users = []
for user_id in user_ids:
# Check if user id is valid, if so add User object to array
users.append(sess.query(User).filter(User.user_id == user_id).one())
for user in users:
new_email = SesEmail(user.email, system_email, template_type=template_type, parameters=email_template)
new_email.send()
return JsonResponse.create(StatusCode.OK, {"message": "Emails successfully sent"})
def perms_to_affiliations(perms, user_id, service_account_flag=False):
""" Convert a list of perms from MAX to a list of UserAffiliations. Filter out and log any malformed perms
Args:
perms: list of permissions (as strings) for the user
user_id: the ID of the user
service_account_flag: flag to indicate a service account
Yields:
UserAffiliations based on the permissions provided
"""
available_cgacs = {cgac.cgac_code: cgac for cgac in GlobalDB.db().session.query(CGAC)}
available_frecs = {frec.frec_code: frec for frec in GlobalDB.db().session.query(FREC)}
log_data = {
'message_type': 'BrokerWarning',
'user_id': user_id
}
for perm in perms:
log_data['message'] = 'User with ID {} has malformed permission: {}'.format(user_id, perm)
components = perm.split('-PERM_')
if len(components) != 2:
logger.warning(log_data)
continue
codes, perm_level = components
split_codes = codes.split('-FREC_')
frec_code, cgac_code = None, None
if len(split_codes) == 2:
# permissions for FR entity code and readonly CGAC
frec_code, cgac_code = split_codes[1], split_codes[0]
if frec_code not in available_frecs or cgac_code not in available_cgacs:
logger.warning(log_data)
continue
else:
# permissions for CGAC
cgac_code = codes
if cgac_code not in available_cgacs:
logger.warning(log_data)
continue
perm_level = perm_level.lower()
if service_account_flag:
# Replace MAX Service Account permissions with Broker "write" and "editfabs" permissions
perm_level = 'we'
elif perm_level not in 'rwsef':
logger.warning(log_data)
continue
for permission in perm_level:
if frec_code:
yield UserAffiliation(
cgac=available_cgacs[cgac_code],
frec=None,
permission_type_id=PERMISSION_SHORT_DICT['r']
)
yield UserAffiliation(
cgac=None,
frec=available_frecs[frec_code],
permission_type_id=PERMISSION_SHORT_DICT[permission]
)
else:
yield UserAffiliation(
cgac=available_cgacs[cgac_code] if cgac_code else None,
frec=None,
permission_type_id=PERMISSION_SHORT_DICT[permission]
)
def best_affiliation(affiliations):
""" If a user has multiple permissions for a single agency, select the best
Args:
affiliations: list of UserAffiliations a user has
Returns:
List of all affiliations the user has (with duplicates, highest of each type/agency provided)
"""
dabs_dict, fabs_dict = {}, {}
# Sort all affiliations from lowest to highest permission
sorted_affiliations = sorted(list(affiliations), key=attrgetter('permission_type_id'))
for affiliation in sorted_affiliations:
# Overwrite low permissions with high permissions; keep DABS and FABS separate so FABS doesn't overwrite DABS
if affiliation.permission_type_id in DABS_PERMISSION_ID_LIST:
dabs_dict[affiliation.cgac, affiliation.frec] = affiliation
elif affiliation.permission_type_id in FABS_PERMISSION_ID_LIST:
fabs_dict[affiliation.cgac, affiliation.frec] = affiliation
all_affils = list(dabs_dict.values()) + list(fabs_dict.values())
return all_affils
def set_user_name(user, cas_attrs):
""" Update the name for the user based on the MAX attributes.
Args:
user: the User object
cas_attrs: a dictionary of the max attributes (includes first, middle, last names) for a logged in user
"""
first_name = cas_attrs['maxAttribute:First-Name']
middle_name = cas_attrs['maxAttribute:Middle-Name']
last_name = cas_attrs['maxAttribute:Last-Name']
# Check for None first so the condition can short-circuit without
# having to worry about calling strip() on a None object
if middle_name is None or middle_name.strip() == '':
user.name = first_name + " " + last_name
else:
user.name = first_name + " " + middle_name[0] + ". " + last_name
def set_max_perms(user, max_group_list, service_account_flag=False):
""" Convert the user group lists present on MAX into a list of UserAffiliations and/or website_admin status.
Permissions are encoded as a comma-separated list of:
{parent-group}-CGAC_{cgac-code}-PERM_{one-of-R-W-S-F}
{parent-group}-CGAC_{cgac-code}-FREC_{frec_code}-PERM_{one-of-R-W-S-F}
or
{parent-group}-CGAC_SYS to indicate website_admin
Args:
user: the User object
max_group_list: list of all MAX groups the user has
service_account_flag: flag to indicate a service account
"""
prefix = CONFIG_BROKER['parent_group'] + '-CGAC_'
# Each group name that we care about begins with the prefix, but once we have that list, we don't need the
# prefix anymore, so trim it off.
if max_group_list is not None:
perms = [group_name[len(prefix):]
for group_name in max_group_list.split(',')
if group_name.startswith(prefix)]
elif service_account_flag:
raise ValueError("There are no DATA Act Broker permissions assigned to this Service Account. You may request "
"permissions at https://community.max.gov/x/fJwuRQ")
else:
perms = []
if 'SYS' in perms:
user.affiliations = []
user.website_admin = True
else:
affiliations = best_affiliation(perms_to_affiliations(perms, user.user_id, service_account_flag))
user.affiliations = affiliations
user.website_admin = False
def json_for_user(user, session_id):
""" Convert the provided user to a dictionary (for JSON)
Args:
user: the User object
Returns:
An object containing user details
"""
return {
"user_id": user.user_id,
"name": user.name,
"title": user.title,
"skip_guide": user.skip_guide,
"website_admin": user.website_admin,
"affiliations": [{"agency_name": affil.cgac.agency_name, "permission": affil.permission_type_name}
if affil.cgac else
{"agency_name": affil.frec.agency_name, "permission": affil.permission_type_name}
for affil in user.affiliations],
"session_id": session_id
}
def get_max_dict(ticket, service):
""" Get the result from MAX's serviceValidate functionality
Args:
ticket: the ticket to send to MAX
service: the service to send to MAX
Returns:
A dictionary of the response from MAX
"""
url = CONFIG_BROKER['cas_service_url'].format(ticket, service)
max_xml = requests.get(url).content
return xmltodict.parse(max_xml)
def logout(session):
""" This function removes the session from the session table if currently logged in, and then returns a success
message
Args:
session: the Session object
Returns:
a JsonResponse that the logout was successful
"""
# Call session handler
LoginSession.logout(session)
return JsonResponse.create(StatusCode.OK, {"message": "Logout successful"})
def list_user_emails():
""" List user names and emails
Returns:
A JsonResponse that contains a list of user information (ID, name, and email)
"""
sess = GlobalDB.db().session
users = sess.query(User)
if not g.user.website_admin:
relevant_cgacs = [aff.cgac_id for aff in g.user.affiliations]
subquery = sess.query(UserAffiliation.user_id).filter(UserAffiliation.cgac_id.in_(relevant_cgacs))
users = users.filter(User.user_id.in_(subquery))
user_info = [{"id": user.user_id, "name": user.name, "email": user.email} for user in users]
return JsonResponse.create(StatusCode.OK, {"users": user_info})
def list_submission_users(is_fabs):
""" List user IDs and names that have submissions that the requesting user can see.
Arguments:
is_fabs: boolean indicating whether it is a DABS or FABS submission (True if FABS)
Returns:
A JsonResponse containing a list of users that have submissions that the requesting user can see
"""
sess = GlobalDB.db().session
# subquery to create the EXISTS portion of the query
exists_query = sess.query(Submission).filter(Submission.user_id == User.user_id,
Submission.is_fabs.is_(is_fabs))
# if user is not an admin, we have to adjust the exists query to limit submissions
if not g.user.website_admin:
# split affiliations into frec and cgac
cgac_affiliations = [aff for aff in g.user.affiliations if aff.cgac]
frec_affiliations = [aff for aff in g.user.affiliations if aff.frec]
# Don't list FABS permissions users if the user only has DABS permissions
if not is_fabs:
cgac_affiliations = [aff for aff in cgac_affiliations if aff.permission_type_id in DABS_PERMISSION_ID_LIST]
frec_affiliations = [aff for aff in frec_affiliations if aff.permission_type_id in DABS_PERMISSION_ID_LIST]
# Make a list of cgac and frec codes
cgac_list = [aff.cgac.cgac_code for aff in cgac_affiliations]
frec_list = [aff.frec.frec_code for aff in frec_affiliations]
# Add filters where applicable
affiliation_filters = [Submission.user_id == g.user.user_id]
if cgac_list:
affiliation_filters.append(Submission.cgac_code.in_(cgac_list))
if frec_list:
affiliation_filters.append(Submission.frec_code.in_(frec_list))
exists_query = exists_query.filter(or_(*affiliation_filters))
# Add an exists onto the query, couldn't do this earlier because then the filters couldn't get added in the if
exists_query = exists_query.exists()
# Get all the relevant users
user_results = sess.query(User.user_id, User.name, User.email).filter(exists_query).order_by(User.name).all()
# Create an array containing relevant users in a readable format
user_list = []
for user in user_results:
user_list.append({'user_id': user[0], 'name': user[1], 'email': user[2]})
return JsonResponse.create(StatusCode.OK, {"users": user_list})
|
cc0-1.0
|
395ee65e4ad25d5cb5a2974b8248d03f
| 39.996109
| 119
| 0.627563
| 4.266451
| false
| false
| false
| false
|
fedspendingtransparency/data-act-broker-backend
|
dataactcore/scripts/remove_fabs_duplicate_submissions.py
|
1
|
4636
|
import logging
from dataactcore.interfaces.db import GlobalDB
from dataactcore.broker_logging import configure_logging
from dataactcore.models.jobModels import PublishedFilesHistory, CertifyHistory, PublishHistory, Submission
from dataactcore.models.userModel import User # noqa
from dataactcore.models.lookups import PUBLISH_STATUS_DICT
from dataactvalidator.health_check import create_app
logger = logging.getLogger(__name__)
if __name__ == '__main__':
""" Cleans up duplicated FABS published records and unpublishes the submissions they're associated with if all
records from a specific submission are deleted.
"""
with create_app().app_context():
configure_logging()
sess = GlobalDB.db().session
logger.info("Beginning script to clean up duplicated FABS records. Creating temporary table.")
# Create a temporary table
sess.execute("""CREATE TEMP TABLE duplicated_fabs AS
SELECT UPPER(afa_generated_unique) as afa_generated_unique, MAX(submission_id) AS max_id
FROM published_fabs
WHERE is_active IS TRUE
GROUP BY UPPER(afa_generated_unique)
HAVING COUNT(1) > 1""")
logger.info("Table created, determining which submissions have been affected.")
# Figure out exactly which submissions have been affected in any way
executed = sess.execute(""" SELECT DISTINCT submission_id
FROM published_fabs AS pf
WHERE is_active IS TRUE
AND EXISTS (SELECT 1
FROM duplicated_fabs AS df
WHERE df.afa_generated_unique = UPPER(pf.afa_generated_unique))""")
affected_submissions = []
for row in executed:
affected_submissions.append(row['submission_id'])
# If no rows are affected, just exit, no need to hit the DB anymore
if len(affected_submissions) == 0:
logger.info("There are no duplicated submissions, ending script.")
exit(0)
logger.info("Deleting duplicate records.")
# Delete duplicates from the published FABS table, keeping the instance with the highest submission_id
executed = sess.execute(""" DELETE FROM published_fabs AS pf
WHERE is_active IS TRUE
AND EXISTS (SELECT 1
FROM duplicated_fabs AS df
WHERE df.afa_generated_unique = UPPER(pf.afa_generated_unique)
AND df.max_id != pf.submission_id)""")
logger.info("Deleted {} duplicate rows from published_fabs. Determining if any "
"submissions have been completely invalidated by the deletes.".format(executed.rowcount))
# Make a list of submissions that have had all published records deleted
cleared_submissions = []
for sub in affected_submissions:
executed = sess.execute(""" SELECT COUNT(*) as result_count
FROM published_fabs
WHERE submission_id = {}""".format(sub))
if executed.fetchone()['result_count'] == 0:
cleared_submissions.append(sub)
# If no submission has been cleared out completely, we can just exit
if len(cleared_submissions) == 0:
logger.info("No affected submissions have been completely invalidated by the deletes, ending script.")
exit(0)
logger.info("The following submissions have been completely invalidated by the deletes, unpublishing them: "
+ ", ".join(str(sub) for sub in cleared_submissions))
# Unpublish each submission that has been cleared out, including deleting any record of it in the
# certified/published tables
for sub in cleared_submissions:
sess.query(PublishedFilesHistory).filter_by(submission_id=sub).delete()
sess.query(CertifyHistory).filter_by(submission_id=sub).delete()
sess.query(PublishHistory).filter_by(submission_id=sub).delete()
sess.query(Submission).filter_by(submission_id=sub).\
update({"publish_status_id": PUBLISH_STATUS_DICT["unpublished"]})
sess.commit()
logger.info("Submissions successfully unpublished, script completed.")
|
cc0-1.0
|
2bd3cc2c51c8a9f36995efd2b961be49
| 50.511111
| 116
| 0.600518
| 5.0282
| false
| false
| false
| false
|
fedspendingtransparency/data-act-broker-backend
|
dataactcore/migrations/versions/de5e3fa1d2d2_adding_display_tas_to_staging_tables_sf133.py
|
1
|
1322
|
""" Adding display_tas to staging tables and SF133 table
Revision ID: de5e3fa1d2d2
Revises: d753553fa79b
Create Date: 2019-10-29 17:43:50.519330
"""
# revision identifiers, used by Alembic.
revision = 'de5e3fa1d2d2'
down_revision = 'd753553fa79b'
branch_labels = None
depends_on = None
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import postgresql
def upgrade(engine_name):
globals()["upgrade_%s" % engine_name]()
def downgrade(engine_name):
globals()["downgrade_%s" % engine_name]()
def upgrade_data_broker():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('appropriation', sa.Column('display_tas', sa.Text()))
op.add_column('award_financial', sa.Column('display_tas', sa.Text()))
op.add_column('object_class_program_activity', sa.Column('display_tas', sa.Text()))
op.add_column('sf_133', sa.Column('display_tas', sa.Text(), nullable=True))
# ### end Alembic commands ###
def downgrade_data_broker():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_column('object_class_program_activity', 'display_tas')
op.drop_column('award_financial', 'display_tas')
op.drop_column('appropriation', 'display_tas')
op.drop_column('sf_133', 'display_tas')
# ### end Alembic commands ###
|
cc0-1.0
|
de7d032990204d0c51189987cde31b99
| 27.73913
| 87
| 0.687595
| 3.232274
| false
| false
| false
| false
|
fedspendingtransparency/data-act-broker-backend
|
dataactcore/scripts/clean_expired_submissions.py
|
1
|
3262
|
import logging
import argparse
import json
from datetime import datetime
from dateutil.relativedelta import relativedelta
from dataactcore.interfaces.db import GlobalDB
from dataactcore.broker_logging import configure_logging
from dataactbroker.handlers.submission_handler import delete_all_submission_data
from dataactcore.models.lookups import PUBLISH_STATUS_DICT
from dataactcore.models.jobModels import Submission
from dataactcore.models.views import SubmissionUpdatedView
from dataactvalidator.health_check import create_app
logger = logging.getLogger(__name__)
def clean_expired_submissions(fy17q1_subs=False):
""" Cleans the database of expired submissions
Definition of an expired submission:
* unpublished
* DABS test submission or certifiable FY17Q1 submissions
* has not been updated (including any of its jobs) in over 6 months
Args:
fy17q1_subs: whether to specifically remove expired submissions from FY17Q1
"""
sess = GlobalDB.db().session
logger.info("Getting expired submissions")
if fy17q1_subs:
expired_submissions = sess.query(Submission).filter(
Submission.publish_status_id == PUBLISH_STATUS_DICT['unpublished'],
Submission.is_fabs.is_(False),
Submission.reporting_fiscal_year == 2017,
Submission.reporting_fiscal_period == 3
).all()
else:
updated_at_view = SubmissionUpdatedView()
expiration_cutoff = datetime.utcnow() - relativedelta(months=6)
expired_submissions = sess.query(Submission).filter(
Submission.publish_status_id == PUBLISH_STATUS_DICT['unpublished'],
Submission.test_submission.is_(True),
updated_at_view.updated_at < expiration_cutoff
).outerjoin(updated_at_view.table, updated_at_view.submission_id == Submission.submission_id).all()
expired_submission_ids = [exp_sub.submission_id for exp_sub in expired_submissions]
logger.info("Expired submissions (count: {}): {}".format(len(expired_submission_ids), expired_submission_ids))
logger.info("Deleting expired submissions")
for submission in expired_submissions:
delete_all_submission_data(submission)
sess.commit()
logger.info("Database cleaned of expired submissions")
return expired_submission_ids
if __name__ == '__main__':
with create_app().app_context():
configure_logging()
parser = argparse.ArgumentParser(description='Clean expired submissions')
parser.add_argument('-fy17q1', '--fy17q1', help='Specifically remove expired submissions from FY17Q1',
action='store_true')
args = parser.parse_args()
start_time = datetime.utcnow()
metrics = {
'script_name': 'clean_expired_submissions.py',
'start_time': str(start_time),
}
expired_subs = clean_expired_submissions(args.fy17q1)
metrics['subs_removed'] = expired_subs
metrics['subs_removed_count'] = len(expired_subs)
metrics['duration'] = str(datetime.utcnow() - start_time)
with open('clean_expired_subs_metrics.json', 'w+') as metrics_file:
json.dump(metrics, metrics_file)
|
cc0-1.0
|
a7bf6d051d74bc735e03bfaca40ddbbe
| 38.301205
| 114
| 0.683936
| 4.144854
| false
| false
| false
| false
|
purduesigbots/pros-cli
|
pros/cli/upload.py
|
1
|
9685
|
from sys import exit
from unicodedata import name
import pros.common.ui as ui
import pros.conductor as c
from .common import *
from pros.ga.analytics import analytics
@pros_root
def upload_cli():
pass
@upload_cli.command(aliases=['u'])
@click.option('--target', type=click.Choice(['v5', 'cortex']), default=None, required=False,
help='Specify the target microcontroller. Overridden when a PROS project is specified.')
@click.argument('path', type=click.Path(exists=True), default=None, required=False)
@click.argument('port', type=str, default=None, required=False)
@project_option(required=False, allow_none=True)
@click.option('--run-after/--no-run-after', 'run_after', default=None, help='Immediately run the uploaded program.',
cls=PROSDeprecated, replacement='after')
@click.option('--run-screen/--execute', 'run_screen', default=None, help='Display run program screen on the brain after upload.',
cls=PROSDeprecated, replacement='after')
@click.option('-af', '--after', type=click.Choice(['run','screen','none']), default=None, help='Action to perform on the brain after upload.',
cls=PROSOption, group='V5 Options')
@click.option('--quirk', type=int, default=0)
@click.option('--name', 'remote_name', type=str, default=None, required=False, help='Remote program name.',
cls=PROSOption, group='V5 Options')
@click.option('--slot', default=None, type=click.IntRange(min=1, max=8), help='Program slot on the GUI.',
cls=PROSOption, group='V5 Options')
@click.option('--icon', type=click.Choice(['pros','pizza','planet','alien','ufo','robot','clawbot','question','X','power']), default='pros',
help="Change Program's icon on the V5 Brain", cls=PROSOption, group='V5 Options')
@click.option('--program-version', default=None, type=str, help='Specify version metadata for program.',
cls=PROSOption, group='V5 Options', hidden=True)
@click.option('--ini-config', type=click.Path(exists=True), default=None, help='Specify a program configuration file.',
cls=PROSOption, group='V5 Options', hidden=True)
@click.option('--compress-bin/--no-compress-bin', 'compress_bin', cls=PROSOption, group='V5 Options', default=True,
help='Compress the program binary before uploading.')
@click.option('--description', default="Made with PROS", type=str, cls=PROSOption, group='V5 Options',
help='Change the description displayed for the program.')
@click.option('--name', default=None, type=str, cls=PROSOption, group='V5 Options',
help='Change the name of the program.')
@default_options
def upload(path: Optional[str], project: Optional[c.Project], port: str, **kwargs):
"""
Upload a binary to a microcontroller.
[PATH] may be a directory or file. If a directory, finds a PROS project root and uploads the binary for the correct
target automatically. If a file, then the file is uploaded. Note that --target must be specified in this case.
[PORT] may be any valid communication port file, such as COM1 or /dev/ttyACM0. If left blank, then a port is
automatically detected based on the target (or as supplied by the PROS project)
"""
analytics.send("upload")
import pros.serial.devices.vex as vex
from pros.serial.ports import DirectPort
kwargs['ide_version'] = project.kernel if not project==None else "None"
kwargs['ide'] = 'PROS'
if path is None or os.path.isdir(path):
if project is None:
project_path = c.Project.find_project(path or os.getcwd())
if project_path is None:
raise click.UsageError('Specify a file to upload or set the cwd inside a PROS project')
project = c.Project(project_path)
path = os.path.join(project.location, project.output)
if project.target == 'v5' and not kwargs['remote_name']:
kwargs['remote_name'] = project.name
# apply upload_options as a template
options = dict(**project.upload_options)
if 'slot' in options and kwargs.get('slot', None) is None:
kwargs.pop('slot')
elif kwargs.get('slot', None) is None:
kwargs['slot'] = 1
if 'icon' in options and kwargs.get('icon','pros') == 'pros':
kwargs.pop('icon')
if 'after' in options and kwargs.get('after','screen') is None:
kwargs.pop('after')
options.update(kwargs)
kwargs = options
kwargs['target'] = project.target # enforce target because uploading to the wrong uC is VERY bad
if 'program-version' in kwargs:
kwargs['version'] = kwargs['program-version']
if 'remote_name' not in kwargs:
kwargs['remote_name'] = project.name
name_to_file = {
'pros' : 'USER902x.bmp',
'pizza' : 'USER003x.bmp',
'planet' : 'USER013x.bmp',
'alien' : 'USER027x.bmp',
'ufo' : 'USER029x.bmp',
'clawbot' : 'USER010x.bmp',
'robot' : 'USER011x.bmp',
'question' : 'USER002x.bmp',
'power' : 'USER012x.bmp',
'X' : 'USER001x.bmp'
}
kwargs['icon'] = name_to_file[kwargs['icon']]
if 'target' not in kwargs or kwargs['target'] is None:
logger(__name__).debug(f'Target not specified. Arguments provided: {kwargs}')
raise click.UsageError('Target not specified. specify a project (using the file argument) or target manually')
if kwargs['target'] == 'v5':
port = resolve_v5_port(port, 'system')[0]
elif kwargs['target'] == 'cortex':
port = resolve_cortex_port(port)
else:
logger(__name__).debug(f"Invalid target provided: {kwargs['target']}")
logger(__name__).debug('Target should be one of ("v5" or "cortex").')
if not port:
raise dont_send(click.UsageError('No port provided or located. Make sure to specify --target if needed.'))
if kwargs['target'] == 'v5':
kwargs['remote_name'] = kwargs['name'] if kwargs.get("name",None) else kwargs['remote_name']
if kwargs['remote_name'] is None:
kwargs['remote_name'] = os.path.splitext(os.path.basename(path))[0]
kwargs['remote_name'] = kwargs['remote_name'].replace('@', '_')
kwargs['slot'] -= 1
action_to_kwarg = {
'run' : vex.V5Device.FTCompleteOptions.RUN_IMMEDIATELY,
'screen' : vex.V5Device.FTCompleteOptions.RUN_SCREEN,
'none' : vex.V5Device.FTCompleteOptions.DONT_RUN
}
after_upload_default = 'screen'
#Determine which FTCompleteOption to assign to run_after
if kwargs['after']==None:
kwargs['after']=after_upload_default
if kwargs['run_after']:
kwargs['after']='run'
elif kwargs['run_screen']==False and not kwargs['run_after']:
kwargs['after']='none'
kwargs['run_after'] = action_to_kwarg[kwargs['after']]
kwargs.pop('run_screen')
kwargs.pop('after')
elif kwargs['target'] == 'cortex':
pass
logger(__name__).debug('Arguments: {}'.format(str(kwargs)))
# Do the actual uploading!
try:
ser = DirectPort(port)
device = None
if kwargs['target'] == 'v5':
device = vex.V5Device(ser)
elif kwargs['target'] == 'cortex':
device = vex.CortexDevice(ser).get_connected_device()
if project is not None:
device.upload_project(project, **kwargs)
else:
with click.open_file(path, mode='rb') as pf:
device.write_program(pf, **kwargs)
except Exception as e:
logger(__name__).exception(e, exc_info=True)
exit(1)
@upload_cli.command('lsusb', aliases=['ls-usb', 'ls-devices', 'lsdev', 'list-usb', 'list-devices'])
@click.option('--target', type=click.Choice(['v5', 'cortex']), default=None, required=False)
@default_options
def ls_usb(target):
"""
List plugged in VEX Devices
"""
analytics.send("ls-usb")
from pros.serial.devices.vex import find_v5_ports, find_cortex_ports
class PortReport(object):
def __init__(self, header: str, ports: List[Any], machine_header: Optional[str] = None):
self.header = header
self.ports = [{'device': p.device, 'desc': p.description} for p in ports]
self.machine_header = machine_header or header
def __getstate__(self):
return {
'device_type': self.machine_header,
'devices': self.ports
}
def __str__(self):
if len(self.ports) == 0:
return f'There are no connected {self.header}'
else:
port_str = "\n".join([f"{p['device']} - {p['desc']}" for p in self.ports])
return f'{self.header}:\n{port_str}'
result = []
if target == 'v5' or target is None:
ports = find_v5_ports('system')
result.append(PortReport('VEX EDR V5 System Ports', ports, 'v5/system'))
ports = find_v5_ports('User')
result.append(PortReport('VEX EDR V5 User ports', ports, 'v5/user'))
if target == 'cortex' or target is None:
ports = find_cortex_ports()
result.append(PortReport('VEX EDR Cortex Microcontroller Ports', ports, 'cortex'))
ui.finalize('lsusb', result)
@upload_cli.command('upload-terminal', aliases=['ut'], hidden=True)
@shadow_command(upload)
@click.pass_context
def make_upload_terminal(ctx, **upload_kwargs):
analytics.send("upload-terminal")
from .terminal import terminal
ctx.invoke(upload, **upload_kwargs)
ctx.invoke(terminal, request_banner=False)
|
mpl-2.0
|
7f70578b1e365d28c5544b96ec43866a
| 45.5625
| 143
| 0.62096
| 3.699389
| false
| false
| false
| false
|
purduesigbots/pros-cli
|
pros/common/utils.py
|
1
|
4701
|
import logging
import os
import os.path
import sys
from functools import lru_cache, wraps
from typing import *
import click
import pros
@lru_cache(1)
def get_version():
try:
ver = open(os.path.join(os.path.dirname(__file__), '..', '..', 'version')).read().strip()
if ver is not None:
return ver
except:
pass
try:
if getattr(sys, 'frozen', False):
import _constants
ver = _constants.CLI_VERSION
if ver is not None:
return ver
except:
pass
try:
import pkg_resources
except ImportError:
pass
else:
import pros.cli.main
module = pros.cli.main.__name__
for dist in pkg_resources.working_set:
scripts = dist.get_entry_map().get('console_scripts') or {}
for script_name, entry_point in iter(scripts.items()):
if entry_point.module_name == module:
ver = dist.version
if ver is not None:
return ver
raise RuntimeError('Could not determine version')
def retries(func, retry: int = 3):
@wraps(func)
def retries_wrapper(*args, n_retries: int = retry, **kwargs):
try:
return func(*args, **kwargs)
except Exception as e:
if n_retries > 0:
return retries_wrapper(*args, n_retries=n_retries - 1, **kwargs)
else:
raise e
return retries_wrapper
def logger(obj: Union[str, object] = pros.__name__) -> logging.Logger:
if isinstance(obj, str):
return logging.getLogger(obj)
return logging.getLogger(obj.__module__)
def isdebug(obj: Union[str, object] = pros.__name__) -> bool:
if obj is None:
obj = pros.__name__
if isinstance(obj, str):
return logging.getLogger(obj).getEffectiveLevel() == logging.DEBUG
return logging.getLogger(obj.__module__).getEffectiveLevel() == logging.DEBUG
def ismachineoutput(ctx: click.Context = None) -> bool:
if ctx is None:
ctx = click.get_current_context(silent=True)
if isinstance(ctx, click.Context):
ctx.ensure_object(dict)
assert isinstance(ctx.obj, dict)
return ctx.obj.get('machine_output', False)
else:
return False
def get_pros_dir():
return click.get_app_dir('PROS')
def with_click_context(func):
ctx = click.get_current_context(silent=True)
if not ctx or not isinstance(ctx, click.Context):
return func
else:
def _wrap(*args, **kwargs):
with ctx:
try:
return func(*args, **kwargs)
except BaseException as e:
logger(__name__).exception(e)
return _wrap
def download_file(url: str, ext: Optional[str] = None, desc: Optional[str] = None) -> Optional[str]:
"""
Helper method to download a temporary file.
:param url: URL of the file to download
:param ext: Expected extension of the file to be downloaded
:param desc: Description of file being downloaded (for progressbar)
:return: The path of the downloaded file, or None if there was an error
"""
import requests
from pros.common.ui import progressbar
# from rfc6266_parser import parse_requests_response
import re
response = requests.get(url, stream=True)
if response.status_code == 200:
filename: str = url.rsplit('/', 1)[-1]
if 'Content-Disposition' in response.headers.keys():
filename = re.findall("filename=(.+)", response.headers['Content-Disposition'])[0]
# try:
# disposition = parse_requests_response(response)
# if isinstance(ext, str):
# filename = disposition.filename_sanitized(ext)
# else:
# filename = disposition.filename_unsafe
# except RuntimeError:
# pass
output_path = os.path.join(get_pros_dir(), 'download', filename)
if os.path.exists(output_path):
os.remove(output_path)
elif not os.path.exists(os.path.dirname(output_path)):
os.makedirs(os.path.dirname(output_path), exist_ok=True)
with open(output_path, mode='wb') as file:
with progressbar(length=int(response.headers['Content-Length']),
label=desc or f'Downloading {filename}') as pb:
for chunk in response.iter_content(256):
file.write(chunk)
pb.update(len(chunk))
return output_path
return None
def dont_send(e: Exception):
e.sentry = False
return e
|
mpl-2.0
|
d9f2c83cf71dc20ef925644ce437568d
| 30.550336
| 100
| 0.582004
| 4.087826
| false
| false
| false
| false
|
purduesigbots/pros-cli
|
pros/conductor/interactive/components.py
|
1
|
1846
|
from collections import defaultdict
from typing import *
from pros.common.ui.interactive import components, parameters
from pros.conductor.interactive.parameters import TemplateParameter
class TemplateListingComponent(components.Container):
def _generate_components(self) -> Generator[components.Component, None, None]:
if not self.editable['name'] and not self.editable['version']:
yield components.Label(self.template.value.identifier)
else:
if self.editable['name']:
yield components.InputBox('Name', self.template.name)
else:
yield components.Label(self.template.value.name)
if self.editable['version']:
if isinstance(self.template.version, parameters.OptionParameter):
yield components.DropDownBox('Version', self.template.version)
else:
yield components.InputBox('Version', self.template.version)
else:
yield components.Label(self.template.value.version)
if self.removable:
remove_button = components.Button('Don\'t remove' if self.template.removed else 'Remove')
remove_button.on_clicked(lambda: self.template.trigger('removed'))
yield remove_button
def __init__(self, template: TemplateParameter,
removable: bool = False,
editable: Union[Dict[str, bool], bool] = True):
self.template = template
self.removable = removable
if isinstance(editable, bool):
self.editable = defaultdict(lambda: editable)
else:
self.editable = defaultdict(lambda: False)
if isinstance(editable, dict):
self.editable.update(**editable)
super().__init__(*self._generate_components())
|
mpl-2.0
|
f70a6628f22ede8ae9d2d4be54677e33
| 44.02439
| 101
| 0.631636
| 4.770026
| false
| false
| false
| false
|
purduesigbots/pros-cli
|
pros/conductor/depots/http_depot.py
|
1
|
1585
|
import os
import zipfile
from datetime import datetime
import jsonpickle
import pros.common.ui as ui
from pros.common import logger
from pros.common.utils import download_file
from .depot import Depot
from ..templates import BaseTemplate, ExternalTemplate
class HttpDepot(Depot):
def __init__(self, name: str, location: str):
super().__init__(name, location, config_schema={})
def fetch_template(self, template: BaseTemplate, destination: str, **kwargs):
import requests
assert 'location' in template.metadata
url = template.metadata['location']
tf = download_file(url, ext='zip', desc=f'Downloading {template.identifier}')
if tf is None:
raise requests.ConnectionError(f'Could not obtain {url}')
with zipfile.ZipFile(tf) as zf:
with ui.progressbar(length=len(zf.namelist()),
label=f'Extracting {template.identifier}') as pb:
for file in zf.namelist():
zf.extract(file, path=destination)
pb.update(1)
os.remove(tf)
return ExternalTemplate(file=os.path.join(destination, 'template.pros'))
def update_remote_templates(self, **_):
import requests
response = requests.get(self.location)
if response.status_code == 200:
self.remote_templates = jsonpickle.decode(response.text)
else:
logger(__name__).warning(f'Unable to access {self.name} ({self.location}): {response.status_code}')
self.last_remote_update = datetime.now()
|
mpl-2.0
|
97273cc19256294b290f2fcb8392b46b
| 37.658537
| 111
| 0.639117
| 4.193122
| false
| false
| false
| false
|
purduesigbots/pros-cli
|
pros/conductor/conductor.py
|
1
|
11642
|
import os.path
import shutil
from pathlib import Path
from typing import *
import click
from semantic_version import Spec, Version
from pros.common import *
from pros.conductor.project import TemplateAction
from pros.conductor.project.template_resolution import InvalidTemplateException
from pros.config import Config
from .depots import Depot, HttpDepot
from .project import Project
from .templates import BaseTemplate, ExternalTemplate, LocalTemplate, Template
MAINLINE_NAME = 'pros-mainline'
MAINLINE_URL = 'https://purduesigbots.github.io/pros-mainline/pros-mainline.json'
class Conductor(Config):
"""
Provides entrances for all conductor-related tasks (fetching, applying, creating new projects)
"""
def __init__(self, file=None):
if not file:
file = os.path.join(click.get_app_dir('PROS'), 'conductor.pros')
self.local_templates: Set[LocalTemplate] = set()
self.depots: Dict[str, Depot] = {}
self.default_target: str = 'v5'
self.default_libraries: Dict[str, List[str]] = None
super(Conductor, self).__init__(file)
needs_saving = False
if MAINLINE_NAME not in self.depots or \
not isinstance(self.depots[MAINLINE_NAME], HttpDepot) or \
self.depots[MAINLINE_NAME].location != MAINLINE_URL:
self.depots[MAINLINE_NAME] = HttpDepot(MAINLINE_NAME, MAINLINE_URL)
needs_saving = True
if self.default_target is None:
self.default_target = 'v5'
needs_saving = True
if self.default_libraries is None:
self.default_libraries = {
'v5': ['okapilib'],
'cortex': []
}
needs_saving = True
if 'v5' not in self.default_libraries:
self.default_libraries['v5'] = []
needs_saving = True
if 'cortex' not in self.default_libraries:
self.default_libraries['cortex'] = []
needs_saving = True
if needs_saving:
self.save()
from pros.common.sentry import add_context
add_context(self)
def get_depot(self, name: str) -> Optional[Depot]:
return self.depots.get(name)
def fetch_template(self, depot: Depot, template: BaseTemplate, **kwargs) -> LocalTemplate:
for t in list(self.local_templates):
if t.identifier == template.identifier:
self.purge_template(t)
if 'destination' in kwargs: # this is deprecated, will work (maybe) but not desirable behavior
destination = kwargs.pop('destination')
else:
destination = os.path.join(self.directory, 'templates', template.identifier)
if os.path.isdir(destination):
shutil.rmtree(destination)
template: Template = depot.fetch_template(template, destination, **kwargs)
click.secho(f'Fetched {template.identifier} from {depot.name} depot', dim=True)
local_template = LocalTemplate(orig=template, location=destination)
local_template.metadata['origin'] = depot.name
click.echo(f'Adding {local_template.identifier} to registry...', nl=False)
self.local_templates.add(local_template)
self.save()
if isinstance(template, ExternalTemplate) and template.directory == destination:
template.delete()
click.secho('Done', fg='green')
return local_template
def purge_template(self, template: LocalTemplate):
if template not in self.local_templates:
logger(__name__).info(f"{template.identifier} was not in the Conductor's local templates cache.")
else:
self.local_templates.remove(template)
if os.path.abspath(template.location).startswith(
os.path.abspath(os.path.join(self.directory, 'templates'))) \
and os.path.isdir(template.location):
shutil.rmtree(template.location)
self.save()
def resolve_templates(self, identifier: Union[str, BaseTemplate], allow_online: bool = True,
allow_offline: bool = True, force_refresh: bool = False,
unique: bool = True, **kwargs) -> List[BaseTemplate]:
results = list() if not unique else set()
kernel_version = kwargs.get('kernel_version', None)
if isinstance(identifier, str):
query = BaseTemplate.create_query(name=identifier, **kwargs)
else:
query = identifier
if allow_offline:
offline_results = filter(lambda t: t.satisfies(query, kernel_version=kernel_version), self.local_templates)
if unique:
results.update(offline_results)
else:
results.extend(offline_results)
if allow_online:
for depot in self.depots.values():
online_results = filter(lambda t: t.satisfies(query, kernel_version=kernel_version),
depot.get_remote_templates(force_check=force_refresh, **kwargs))
if unique:
results.update(online_results)
else:
results.extend(online_results)
logger(__name__).debug('Saving Conductor config after checking for remote updates')
self.save() # Save self since there may have been some updates from the depots
return list(results)
def resolve_template(self, identifier: Union[str, BaseTemplate], **kwargs) -> Optional[BaseTemplate]:
if isinstance(identifier, str):
kwargs['name'] = identifier
elif isinstance(identifier, BaseTemplate):
kwargs['orig'] = identifier
query = BaseTemplate.create_query(**kwargs)
logger(__name__).info(f'Query: {query}')
logger(__name__).debug(query.__dict__)
templates = self.resolve_templates(query, **kwargs)
logger(__name__).info(f'Candidates: {", ".join([str(t) for t in templates])}')
if not any(templates):
return None
query.version = str(Spec(query.version or '>0').select([Version(t.version) for t in templates]))
v = Version(query.version)
v.prerelease = v.prerelease if len(v.prerelease) else ('',)
v.build = v.build if len(v.build) else ('',)
query.version = f'=={v}'
logger(__name__).info(f'Resolved to {query.identifier}')
templates = self.resolve_templates(query, **kwargs)
if not any(templates):
return None
# prefer local templates first
local_templates = [t for t in templates if isinstance(t, LocalTemplate)]
if any(local_templates):
# there's a local template satisfying the query
if len(local_templates) > 1:
# This should never happen! Conductor state must be invalid
raise Exception(f'Multiple local templates satisfy {query.identifier}!')
return [t for t in templates if isinstance(t, LocalTemplate)][0]
# prefer pros-mainline template second
mainline_templates = [t for t in templates if t.metadata['origin'] == 'pros-mainline']
if any(mainline_templates):
return mainline_templates[0]
# No preference, just FCFS
return templates[0]
def apply_template(self, project: Project, identifier: Union[str, BaseTemplate], **kwargs):
upgrade_ok = kwargs.get('upgrade_ok', True)
install_ok = kwargs.get('install_ok', True)
downgrade_ok = kwargs.get('downgrade_ok', True)
download_ok = kwargs.get('download_ok', True)
force = kwargs.get('force_apply', False)
kwargs['target'] = project.target
if 'kernel' in project.templates:
# support_kernels for backwards compatibility, but kernel_version should be getting most of the exposure
kwargs['kernel_version'] = kwargs['supported_kernels'] = project.templates['kernel'].version
template = self.resolve_template(identifier=identifier, allow_online=download_ok, **kwargs)
if template is None:
raise dont_send(
InvalidTemplateException(f'Could not find a template satisfying {identifier} for {project.target}'))
if not isinstance(template, LocalTemplate):
with ui.Notification():
template = self.fetch_template(self.get_depot(template.metadata['origin']), template, **kwargs)
assert isinstance(template, LocalTemplate)
logger(__name__).info(str(project))
valid_action = project.get_template_actions(template)
if valid_action == TemplateAction.NotApplicable:
raise dont_send(
InvalidTemplateException(f'{template.identifier} is not applicable to {project}', reason=valid_action)
)
if force \
or (valid_action == TemplateAction.Upgradable and upgrade_ok) \
or (valid_action == TemplateAction.Installable and install_ok) \
or (valid_action == TemplateAction.Downgradable and downgrade_ok):
project.apply_template(template, force_system=kwargs.pop('force_system', False),
force_user=kwargs.pop('force_user', False),
remove_empty_directories=kwargs.pop('remove_empty_directories', False))
ui.finalize('apply', f'Finished applying {template.identifier} to {project.location}')
else:
raise dont_send(
InvalidTemplateException(f'Could not install {template.identifier} because it is {valid_action.name},'
f' and that is not allowed.', reason=valid_action)
)
@staticmethod
def remove_template(project: Project, identifier: Union[str, BaseTemplate], remove_user: bool = True,
remove_empty_directories: bool = True):
ui.logger(__name__).debug(f'Uninstalling templates matching {identifier}')
for template in project.resolve_template(identifier):
ui.echo(f'Uninstalling {template.identifier}')
project.remove_template(template, remove_user=remove_user,
remove_empty_directories=remove_empty_directories)
def new_project(self, path: str, no_default_libs: bool = False, **kwargs) -> Project:
if Path(path).exists() and Path(path).samefile(os.path.expanduser('~')):
raise dont_send(ValueError('Will not create a project in user home directory'))
proj = Project(path=path, create=True)
if 'target' in kwargs:
proj.target = kwargs['target']
if 'project_name' in kwargs and kwargs['project_name'] and not kwargs['project_name'].isspace():
proj.project_name = kwargs['project_name']
else:
proj.project_name = os.path.basename(os.path.normpath(os.path.abspath(path)))
if 'version' in kwargs:
if kwargs['version'] == 'latest':
kwargs['version'] = '>=0'
self.apply_template(proj, identifier='kernel', **kwargs)
proj.save()
if not no_default_libs:
for library in self.default_libraries[proj.target]:
try:
# remove kernel version so that latest template satisfying query is correctly selected
if 'version' in kwargs:
kwargs.pop('version')
self.apply_template(proj, library, **kwargs)
except Exception as e:
logger(__name__).exception(e)
return proj
|
mpl-2.0
|
6c24790d24a1216a6b8b79b730aeee0a
| 47.508333
| 119
| 0.613812
| 4.313449
| false
| false
| false
| false
|
purduesigbots/pros-cli
|
pros/conductor/project/ProjectTransaction.py
|
1
|
7498
|
import itertools as it
import os
import tempfile
import zipfile
from typing import *
import pros.common.ui as ui
import pros.conductor as c
from pros.conductor.project.template_resolution import InvalidTemplateException, TemplateAction
class Action(object):
def execute(self, conductor: c.Conductor, project: c.Project) -> None:
raise NotImplementedError()
def describe(self, conductor: c.Conductor, project: c.Project) -> str:
raise NotImplementedError()
def can_execute(self, conductor: c.Conductor, project: c.Project) -> bool:
raise NotImplementedError()
class ApplyTemplateAction(Action):
def __init__(self, template: c.BaseTemplate, apply_kwargs: Dict[str, Any] = None,
suppress_already_installed: bool = False):
self.template = template
self.apply_kwargs = apply_kwargs or {}
self.suppress_already_installed = suppress_already_installed
def execute(self, conductor: c.Conductor, project: c.Project):
try:
conductor.apply_template(project, self.template, **self.apply_kwargs)
except InvalidTemplateException as e:
if e.reason != TemplateAction.AlreadyInstalled or not self.suppress_already_installed:
raise e
else:
ui.logger(__name__).warning(str(e))
return None
def describe(self, conductor: c.Conductor, project: c.Project):
action = project.get_template_actions(conductor.resolve_template(self.template))
if action == TemplateAction.NotApplicable:
return f'{self.template.identifier} cannot be applied to project!'
if action == TemplateAction.Installable:
return f'{self.template.identifier} will installed to project.'
if action == TemplateAction.Downgradable:
return f'Project will be downgraded to {self.template.identifier} from' \
f' {project.templates[self.template.name].version}.'
if action == TemplateAction.Upgradable:
return f'Project will be upgraded to {self.template.identifier} from' \
f' {project.templates[self.template.name].version}.'
if action == TemplateAction.AlreadyInstalled:
if self.apply_kwargs.get('force_apply'):
return f'{self.template.identifier} will be re-applied.'
elif self.suppress_already_installed:
return f'{self.template.identifier} will not be re-applied.'
else:
return f'{self.template.identifier} cannot be applied to project because it is already installed.'
def can_execute(self, conductor: c.Conductor, project: c.Project) -> bool:
action = project.get_template_actions(conductor.resolve_template(self.template))
if action == TemplateAction.AlreadyInstalled:
return self.apply_kwargs.get('force_apply') or self.suppress_already_installed
return action in [TemplateAction.Installable, TemplateAction.Downgradable, TemplateAction.Upgradable]
class RemoveTemplateAction(Action):
def __init__(self, template: c.BaseTemplate, remove_kwargs: Dict[str, Any] = None,
suppress_not_removable: bool = False):
self.template = template
self.remove_kwargs = remove_kwargs or {}
self.suppress_not_removable = suppress_not_removable
def execute(self, conductor: c.Conductor, project: c.Project):
try:
conductor.remove_template(project, self.template, **self.remove_kwargs)
except ValueError as e:
if not self.suppress_not_removable:
raise e
else:
ui.logger(__name__).warning(str(e))
def describe(self, conductor: c.Conductor, project: c.Project) -> str:
return f'{self.template.identifier} will be removed'
def can_execute(self, conductor: c.Conductor, project: c.Project):
return True
class ChangeProjectNameAction(Action):
def __init__(self, new_name: str):
self.new_name = new_name
def execute(self, conductor: c.Conductor, project: c.Project):
project.project_name = self.new_name
project.save()
def describe(self, conductor: c.Conductor, project: c.Project):
return f'Project will be renamed to: "{self.new_name}"'
def can_execute(self, conductor: c.Conductor, project: c.Project):
return True
class ProjectTransaction(object):
def __init__(self, project: c.Project, conductor: Optional[c.Conductor] = None):
self.project = project
self.conductor = conductor or c.Conductor()
self.actions: List[Action] = []
def add_action(self, action: Action) -> None:
self.actions.append(action)
def execute(self):
if len(self.actions) == 0:
ui.logger(__name__).warning('No actions necessary.')
return
location = self.project.location
tfd, tfn = tempfile.mkstemp(prefix='pros-project-', suffix=f'-{self.project.name}.zip', text='w+b')
with os.fdopen(tfd, 'w+b') as tf:
with zipfile.ZipFile(tf, mode='w') as zf:
files, length = it.tee(location.glob('**/*'), 2)
length = len(list(length))
with ui.progressbar(files, length=length, label=f'Backing up {self.project.name} to {tfn}') as pb:
for file in pb:
zf.write(file, arcname=file.relative_to(location))
try:
with ui.Notification():
for action in self.actions:
ui.logger(__name__).debug(action.describe(self.conductor, self.project))
rv = action.execute(self.conductor, self.project)
ui.logger(__name__).debug(f'{action} returned {rv}')
if rv is not None and not rv:
raise ValueError('Action did not complete successfully')
ui.echo('All actions performed successfully')
except Exception as e:
ui.logger(__name__).warning(f'Failed to perform transaction, restoring project to previous state')
with zipfile.ZipFile(tfn) as zf:
with ui.progressbar(zf.namelist(), label=f'Restoring {self.project.name} from {tfn}') as pb:
for file in pb:
zf.extract(file, path=location)
ui.logger(__name__).exception(e)
finally:
ui.echo(f'Removing {tfn}')
os.remove(tfn)
def apply_template(self, template: c.BaseTemplate, suppress_already_installed: bool = False, **kwargs):
self.add_action(
ApplyTemplateAction(template, suppress_already_installed=suppress_already_installed, apply_kwargs=kwargs)
)
def rm_template(self, template: c.BaseTemplate, suppress_not_removable: bool = False, **kwargs):
self.add_action(
RemoveTemplateAction(template, suppress_not_removable=suppress_not_removable, remove_kwargs=kwargs)
)
def change_name(self, new_name: str):
self.add_action(ChangeProjectNameAction(new_name))
def describe(self) -> str:
if len(self.actions) > 0:
return '\n'.join(
f'- {a.describe(self.conductor, self.project)}'
for a in self.actions
)
else:
return 'No actions necessary.'
def can_execute(self) -> bool:
return all(a.can_execute(self.conductor, self.project) for a in self.actions)
|
mpl-2.0
|
c43844cf4d763cc35dfab833c72655b2
| 42.091954
| 117
| 0.630168
| 4.09503
| false
| false
| false
| false
|
purduesigbots/pros-cli
|
pros/cli/interactive.py
|
1
|
1454
|
import os
from typing import *
import click
import pros.conductor as c
from .common import PROSGroup, default_options, project_option, pros_root
from pros.ga.analytics import analytics
@pros_root
def interactive_cli():
pass
@interactive_cli.group(cls=PROSGroup, hidden=True)
@default_options
def interactive():
pass
@interactive.command()
@click.option('--directory', default=os.path.join(os.path.expanduser('~'), 'My PROS Project'))
@default_options
def new_project(directory):
from pros.common.ui.interactive.renderers import MachineOutputRenderer
from pros.conductor.interactive.NewProjectModal import NewProjectModal
app = NewProjectModal(directory=directory)
MachineOutputRenderer(app).run()
@interactive.command()
@project_option(required=False, default=None, allow_none=True)
@default_options
def update_project(project: Optional[c.Project]):
from pros.common.ui.interactive.renderers import MachineOutputRenderer
from pros.conductor.interactive.UpdateProjectModal import UpdateProjectModal
app = UpdateProjectModal(project)
MachineOutputRenderer(app).run()
@interactive.command()
@project_option(required=False, default=None, allow_none=True)
@default_options
def upload(project: Optional[c.Project]):
from pros.common.ui.interactive.renderers import MachineOutputRenderer
from pros.serial.interactive import UploadProjectModal
MachineOutputRenderer(UploadProjectModal(project)).run()
|
mpl-2.0
|
8e9a06907720c1cb3eeb389d996707cb
| 31.311111
| 94
| 0.788858
| 3.826316
| false
| false
| false
| false
|
requests/requests-oauthlib
|
docs/conf.py
|
1
|
8699
|
# -*- coding: utf-8 -*-
#
# Requests-OAuthlib documentation build configuration file, created by
# sphinx-quickstart on Fri May 10 11:49:01 2013.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath(".."))
from requests_oauthlib import __version__
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ["sphinx.ext.autodoc", "sphinx.ext.intersphinx"]
# Add any paths that contain templates here, relative to this directory.
templates_path = ["_templates"]
# The suffix of source filenames.
source_suffix = ".rst"
# The encoding of source files.
# source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = "index"
# General information about the project.
project = "Requests-OAuthlib"
copyright = "2014, Kenneth Reitz"
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = __version__
# The full version, including alpha/beta/rc tags.
release = version
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
# language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
# today = ''
# Else, today_fmt is used as the format for a strftime call.
# today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ["_build"]
# The reST default role (used for this markup: `text`) to use for all documents.
# default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = "sphinx"
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
# keep_warnings = False
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = "sphinx_rtd_theme"
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
# html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
# html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
# html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
# html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
# html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
# html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
# html_static_path = ["_static"]
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
# html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
# html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
# html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
# html_additional_pages = {}
# If false, no module index is generated.
# html_domain_indices = True
# If false, no index is generated.
# html_use_index = True
# If true, the index is split into individual pages for each letter.
# html_split_index = False
# If true, links to the reST sources are added to the pages.
# html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
# html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
# html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
# html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = "Requests-OAuthlibdoc"
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
(
"index",
"Requests-OAuthlib.tex",
"Requests-OAuthlib Documentation",
"Requests-OAuthlib Contributors",
"manual",
)
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
# latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
# latex_use_parts = False
# If true, show page references after internal links.
# latex_show_pagerefs = False
# If true, show URL addresses after external links.
# latex_show_urls = False
# Documents to append as an appendix to all manuals.
# latex_appendices = []
# If false, no module index is generated.
# latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(
"index",
"requests-oauthlib",
"Requests-OAuthlib Documentation",
["Requests-OAuthlib Contributors"],
1,
)
]
# If true, show URL addresses after external links.
# man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(
"index",
"Requests-OAuthlib",
"Requests-OAuthlib Documentation",
"Requests-OAuthlib Contributors",
"Requests-OAuthlib",
"One line description of project.",
"Miscellaneous",
)
]
# Documents to append as an appendix to all manuals.
# texinfo_appendices = []
# If false, no module index is generated.
# texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
# texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
# texinfo_no_detailmenu = False
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {
"python": ("https://python.readthedocs.io/en/latest/", None),
"requests": ("https://requests.readthedocs.io/en/latest/", None),
"oauthlib": ("https://oauthlib.readthedocs.io/en/latest/", None),
}
|
isc
|
c10cdefc2677aeb20d4f0dfdb95ae7d3
| 31.099631
| 80
| 0.692034
| 3.85765
| false
| true
| false
| false
|
eregs/regulations-parser
|
regparser/tree/depth/heuristics.py
|
3
|
3172
|
"""Set of heuristics for trimming down the set of solutions. Each heuristic
works by penalizing a solution; it's then up to the caller to grab the
solution with the least penalties."""
from collections import defaultdict
from itertools import takewhile
from regparser.tree.depth import markers
def prefer_multiple_children(solutions, weight=1.0):
"""Dock solutions which have a paragraph with exactly one child. While
this is possible, it's unlikely."""
result = []
for solution in solutions:
flags = 0
depths = [a.depth for a in solution.assignment]
for i, depth in enumerate(depths):
child_depths = takewhile(lambda d: d > depth, depths[i + 1:])
matching_depths = [d for d in child_depths if d == depth + 1]
if len(matching_depths) == 1:
flags += 1
result.append(solution.copy_with_penalty(weight * flags / len(depths)))
return result
def prefer_diff_types_diff_levels(solutions, weight=1.0):
"""Dock solutions which have different markers appearing at the same
level. This also occurs, but not often."""
result = []
for solution in solutions:
depth_types = defaultdict(set)
for par in solution.assignment:
depth_types[par.depth].add(par.typ)
flags, total = 0, 0
for types in depth_types.values():
total += len(types)
flags += len(types) - 1
result.append(solution.copy_with_penalty(weight * flags / total))
return result
def prefer_shallow_depths(solutions, weight=0.1):
"""Dock solutions which have a higher maximum depth"""
# Smallest maximum depth across solutions
min_max_depth = min(max(p.depth for p in s.assignment) for s in solutions)
max_max_depth = max(p.depth for s in solutions for p in s.assignment)
variance = max_max_depth - min_max_depth
if variance:
result = []
for solution in solutions:
max_depth = max(p.depth for p in solution.assignment)
flags = max_depth - min_max_depth
result.append(solution.copy_with_penalty(
weight * flags / variance))
return result
else:
return solutions
def prefer_no_markerless_sandwich(solutions, weight=1.0):
"""Prefer solutions which don't use MARKERLESS to switch depth, like
a
MARKERLESS
a
"""
result = []
for solution in solutions:
flags = 0
for idx in range(2, len(solution.assignment)):
pprev_depth = solution.assignment[idx - 2].depth
prev_typ = solution.assignment[idx - 1].typ
prev_depth = solution.assignment[idx - 1].depth
depth = solution.assignment[idx].depth
sandwich = prev_typ == markers.markerless
incremented = depth == prev_depth + 1
incrementing = prev_depth == pprev_depth + 1
if sandwich and incremented and incrementing:
flags += 1
total = len(solution.assignment)
result.append(solution.copy_with_penalty(
weight * flags / float(total)))
return result
|
cc0-1.0
|
912ffab79acf6b211a10f0b7eccf9fb3
| 35.045455
| 79
| 0.625158
| 4.092903
| false
| false
| false
| false
|
eregs/regulations-parser
|
regparser/notice/build.py
|
3
|
4798
|
import logging
from lxml import etree
from regparser.grammar.unified import notice_cfr_p
from regparser.notice.amendments.fetch import fetch_amendments
from regparser.notice.dates import fetch_dates
from regparser.notice.sxs import (build_section_by_section,
find_section_by_section)
from regparser.notice.util import spaces_then_remove, swap_emphasis_tags
from regparser.notice.xml import xmls_for_url
logger = logging.getLogger(__name__)
def build_notice(cfr_title, cfr_part, fr_notice, fetch_xml=True,
xml_to_process=None):
"""Given JSON from the federal register, create our notice structure"""
cfr_parts = {str(ref['part']) for ref in fr_notice['cfr_references']}
if cfr_part:
cfr_parts.add(cfr_part)
notice = {'cfr_title': cfr_title, 'cfr_parts': list(cfr_parts)}
# Copy over most fields
for field in ['comments_close_on', 'document_number', 'publication_date',
'regulation_id_numbers']:
if fr_notice[field]:
notice[field] = fr_notice[field]
if fr_notice['effective_on']:
notice['effective_on'] = fr_notice['effective_on']
notice['initial_effective_on'] = fr_notice['effective_on']
if fr_notice['html_url']:
notice['fr_url'] = fr_notice['html_url']
if fr_notice['citation']:
notice['fr_citation'] = fr_notice['citation']
notice['fr_volume'] = fr_notice['volume']
notice['meta'] = {}
for key in ('dates', 'end_page', 'start_page', 'type'):
notice['meta'][key] = fr_notice[key]
if xml_to_process is not None:
return [process_xml(notice, xml_to_process)]
elif fr_notice['full_text_xml_url'] and fetch_xml:
xmls = xmls_for_url(fr_notice['full_text_xml_url'])
notices = [process_xml(notice, xml) for xml in xmls]
set_document_numbers(notices)
return notices
return [notice]
def split_doc_num(doc_num, effective_date):
""" If we have a split notice, we construct a document number
based on the original document number and the effective date. """
effective_date = ''.join(effective_date.split('-'))
return '{0}_{1}'.format(doc_num, effective_date)
def set_document_numbers(notices):
"""If we have multiple notices (due to being split across multiple
effective dates,) we need to fix their document numbers."""
if len(notices) > 1:
for notice in notices:
notice['document_number'] = split_doc_num(
notice['document_number'], notice['effective_on'])
return notices
def process_sxs(notice, notice_xml):
""" Find and build SXS from the notice_xml. """
sxs = find_section_by_section(notice_xml)
# note we will continue to use cfr_parts[0] as the default SxS label until
# we find a counter example
sxs = build_section_by_section(sxs, notice['meta']['start_page'],
notice['cfr_parts'][0])
notice['section_by_section'] = sxs
# @todo - this can be deleted once we remove process_xml
def fetch_cfr_parts(notice_xml):
""" Sometimes we need to read the CFR part numbers from the notice
XML itself. This would need to happen when we've broken up a
multiple-effective-date notice that has multiple CFR parts that
may not be included in each date. """
parts = []
for cfr_elm in notice_xml.xpath('//CFR'):
parts.extend(notice_cfr_p.parseString(cfr_elm.text).cfr_parts)
return list(sorted(set(parts)))
def process_xml(notice, notice_xml):
"""Pull out relevant fields from the xml and add them to the notice"""
notice = dict(notice) # defensive copy
if not notice.get('effective_on'):
dates = fetch_dates(notice_xml)
if dates and 'effective' in dates:
notice['effective_on'] = dates['effective'][0]
if not notice.get('cfr_parts'):
cfr_parts = fetch_cfr_parts(notice_xml)
notice['cfr_parts'] = cfr_parts
process_sxs(notice, notice_xml)
amds = fetch_amendments(notice_xml)
if amds:
notice['amendments'] = amds
add_footnotes(notice, notice_xml)
return notice
def add_footnotes(notice, notice_xml):
""" Parse the notice xml for footnotes and add them to the notice. """
notice['footnotes'] = {}
for child in notice_xml.xpath('//FTNT/*'):
spaces_then_remove(child, 'PRTPAGE')
swap_emphasis_tags(child)
ref = child.xpath('.//SU')
if ref:
child.text = ref[0].tail
child.remove(ref[0])
content = child.text
for cc in child:
content += etree.tounicode(cc)
if child.tail:
content += child.tail
notice['footnotes'][ref[0].text] = content.strip()
|
cc0-1.0
|
52609f538c659550642b043ee8501ce4
| 35.348485
| 78
| 0.630054
| 3.610233
| false
| false
| false
| false
|
eregs/regulations-parser
|
regparser/web/settings/base.py
|
3
|
3882
|
"""
Django settings for regparser.web project.
Generated by 'django-admin startproject' using Django 1.9.7.
For more information on this file, see
https://docs.djangoproject.com/en/1.9/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.9/ref/settings/
"""
import os
import dj_database_url
from django.utils.crypto import get_random_string
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.9/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = os.environ.get('DJANGO_SECRET_KEY', get_random_string(50))
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'django_rq',
'regparser.web',
'regparser.web.jobs',
'regparser.web.index',
'rest_framework'
]
MIDDLEWARE_CLASSES = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'regparser.web.urls'
TEMPLATES = [
# @todo - probably want to switch this to jinja before writing any
# templates
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'regparser.web.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.9/ref/settings/#databases
DATABASES = {
# Read database config from the DATABASE_URL env var. See
# https://github.com/kennethreitz/dj-database-url#url-schema
'default': dj_database_url.config(
default='sqlite:///' + os.path.join(BASE_DIR, 'db.sqlite3'))
}
# Password validation
# https://docs.djangoproject.com/en/1.9/ref/settings/#auth-password-validators
_prefix = 'django.contrib.auth.password_validation.'
AUTH_PASSWORD_VALIDATORS = [
{'NAME': _prefix + 'UserAttributeSimilarityValidator'},
{'NAME': _prefix + 'MinimumLengthValidator'},
{'NAME': _prefix + 'CommonPasswordValidator'},
{'NAME': _prefix + 'NumericPasswordValidator'},
]
# Internationalization
# https://docs.djangoproject.com/en/1.9/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.9/howto/static-files/
STATIC_URL = '/static/'
RQ_QUEUES = {
'default': {
'HOST': 'localhost',
'PORT': 6379,
'DB': 0
},
}
EREGS_INDEX_ROOT = os.environ.get('EREGS_CACHE_DIR', '.eregs_index')
REQUESTS_CACHE = {
'backend': 'sqlite',
'cache_name': os.path.join(EREGS_INDEX_ROOT, 'http_cache'),
'expire_after': 60 * 60 * 24 * 3 # 3 days
}
FILE_UPLOAD_HANDLERS = [
"django.core.files.uploadhandler.TemporaryFileUploadHandler"]
|
cc0-1.0
|
c57a75a22eba2b638cfc56a6b4346793
| 25.958333
| 78
| 0.685987
| 3.47538
| false
| false
| false
| false
|
eregs/regulations-parser
|
regparser/commands/preprocess_notice.py
|
3
|
3678
|
import click
from regparser import federalregister
from regparser.commands.dependency_resolver import DependencyResolver
from regparser.index import entry
from regparser.notice.build import split_doc_num
from regparser.notice.xml import TitlePartsRef, notice_xmls_for_url
def convert_cfr_refs(refs=None):
"""
Get the references to CFR titles and parts out of the metadata.
Return a list of TitlePartsRef objects grouped by title and
sorted, for example::
[{"title": 0, "part": 23}, {"title": 0, "part": 17}]
Becomes::
[TitlePartsRef(title="0", parts=["17", "23"])]
:arg list refs: The list of title/part pairs; if empty, will create an
empty ``EREGS_CFR_REFS`` element and return an empty list.
:rtype: list
:returns: Grouped & sorted list.
"""
refs = refs or []
# Group parts by title:
refd = {r["title"]: [] for r in refs}
for ref in refs:
refd[ref["title"]].append(ref["part"])
refs = [{u"title": k, "parts": refd[k]} for k in refd]
# Sort parts and sort list by title:
refs = [TitlePartsRef(r["title"], sorted(r["parts"], key=int))
for r in refs]
return sorted(refs, key=lambda x: int(x.title))
@click.command()
@click.argument('document_number')
def preprocess_notice(document_number):
"""Preprocess notice XML. Either fetch from the Federal Register or read a
notice from disk. Apply some common transformations to it and output the
resulting file(s). There may be more than one as documents might be split
if they have multiple effective dates."""
meta = federalregister.meta_data(
document_number, [
"agencies",
"docket_ids",
"effective_on",
"cfr_references",
"comments_close_on",
"end_page",
"full_text_xml_url",
"html_url",
"publication_date",
"regulation_id_numbers",
"start_page",
"volume"
])
notice_xmls = list(notice_xmls_for_url(meta['full_text_xml_url']))
for notice_xml in notice_xmls:
notice_xml.published = meta['publication_date']
notice_xml.fr_volume = meta['volume']
notice_xml.start_page = meta['start_page']
notice_xml.end_page = meta['end_page']
if meta.get('html_url'):
notice_xml.fr_html_url = meta['html_url']
if meta.get("comments_close_on"):
notice_xml.comments_close_on = meta["comments_close_on"]
if meta.get('regulation_id_numbers'):
notice_xml.rins = meta['regulation_id_numbers']
if meta.get('docket_ids'):
notice_xml.docket_ids = meta['docket_ids']
notice_xml.set_agencies(meta.get('agencies', []))
cfr_refs = convert_cfr_refs(meta.get('cfr_references', []))
if cfr_refs:
notice_xml.cfr_refs = cfr_refs
file_name = document_number
if len(notice_xmls) > 1:
effective_date = notice_xml.derive_effective_date()
file_name = split_doc_num(document_number,
effective_date.isoformat())
elif meta.get('effective_on'):
notice_xml.effective = meta['effective_on']
notice_xml.version_id = file_name
notice_xml.derive_where_needed()
notice_entry = entry.Notice(file_name)
notice_entry.write(notice_xml)
class NoticeResolver(DependencyResolver):
PATH_PARTS = (entry.Notice.PREFIX, '(?P<doc_number>[a-zA-Z0-9-_]+)')
def resolution(self):
args = [self.match.group('doc_number')]
return preprocess_notice.main(args, standalone_mode=False)
|
cc0-1.0
|
b26f667840c23522e5d15e53e3626f8f
| 35.058824
| 78
| 0.610114
| 3.641584
| false
| false
| false
| false
|
eregs/regulations-parser
|
interpparser/layers.py
|
3
|
2437
|
from collections import defaultdict
from interpparser.tree import text_to_labels
from regparser.citations import Label
from regparser.layer.layer import Layer
from regparser.tree import struct
class Interpretations(Layer):
"""Supplement I (interpretations) provides (sometimes very lengthy) extra
information about particular paragraphs. This layer provides those
interpretations."""
shorthand = 'interpretations'
def __init__(self, *args, **kwargs):
Layer.__init__(self, *args, **kwargs)
self.lookup_table = defaultdict(list)
def pre_process(self):
"""Create a lookup table for each interpretation"""
def per_node(node):
if (node.node_type != struct.Node.INTERP or
node.label[-1] != struct.Node.INTERP_MARK):
return
# Always add a connection based on the interp's label
self.lookup_table[tuple(node.label[:-1])].append(node)
# Also add connections based on the title
for label in text_to_labels(node.title or '',
Label.from_node(node),
warn=False):
label = tuple(label[:-1]) # Remove Interp marker
if node not in self.lookup_table[label]:
self.lookup_table[label].append(node)
struct.walk(self.tree, per_node)
def process(self, node):
"""Is there an interpretation associated with this node? If yes,
return the associated layer information. @TODO: Right now, this only
associates if there is a direct match. It should also associate if any
parents match"""
label = tuple(node.label)
non_empty = [n for n in self.lookup_table[label]
if not self.empty_interpretation(n)]
return [{'reference': n.label_id()} for n in non_empty] or None
@staticmethod
def empty_interpretation(interp):
"""We don't want to include empty (e.g. \n\n) nodes as
interpretations unless their children are subparagraphs. We
distinguish subparagraphs from structural children by checking the
location of the 'Interp' delimiter."""
if interp.text.strip():
return False
return all(not child.label or
child.label[-1] == struct.Node.INTERP_MARK
for child in interp.children)
|
cc0-1.0
|
4f9a50749ddb1558e5fad81e22003ce3
| 41.017241
| 78
| 0.612228
| 4.47156
| false
| false
| false
| false
|
eregs/regulations-parser
|
regparser/web/jobs/views.py
|
2
|
13335
|
import abc
import hashlib
import six
from django.http import HttpResponse
from rest_framework import generics, mixins, status
from rest_framework.parsers import FileUploadParser, MultiPartParser
from rest_framework.renderers import BrowsableAPIRenderer, JSONRenderer
from rest_framework.response import Response
from regparser.web.jobs.models import (PipelineJob, ProposalPipelineJob,
RegulationFile)
from regparser.web.jobs.serializers import (FileUploadSerializer,
PipelineJobSerializer,
ProposalPipelineJobSerializer)
from regparser.web.jobs.utils import (add_redis_data_to_job_data,
create_status_url, delete_eregs_job,
eregs_site_api_url, file_url,
queue_eregs_job,
queue_notification_email)
renderer_classes = (
JSONRenderer,
BrowsableAPIRenderer
)
class BaseViewList(six.with_metaclass(abc.ABCMeta)):
"""
Intended to be subclassed by classes subclassing ``JobViewList``.
Contains the POST-related methods that are relevant to subclasses of
``JobViewList`` but not to ``JobViewList``.
Should be in the subclass list before ``JobViewList``.
"""
@abc.abstractmethod
def build_eregs_args(self, validated_data):
"""
Each type of parser job has its own set of arguments.
The ``create`` method calls this method to construct the argument
string specific to that type of job.
:arg dict validated_data: Incoming data from the POST that's already
been validated by the serializer.
:rtype: list[str]
:returns: The components of the argument string in list form.
"""
raise NotImplementedError()
def create(self, request, *args, **kwargs):
"""
Overrides the ``create`` method of ``mixins.CreateModelMixin`` in order
to add the new job to the Redis queue.
Side effects
Via ``queue_eregs_job`` and ``PipelineJobSerializer.save``, alters
the redis queue and the DB.
:arg HttpRequest request: the incoming request.
:rtype: Response
:returns: JSON or HTML of the information about the job (status 201),
or about why the job couldn't be added (status 400).
"""
serialized = self.get_serializer(data=request.data)
serialized.is_valid(raise_exception=True)
eregs_args = self.build_eregs_args(serialized.validated_data)
job = queue_eregs_job(eregs_args, timeout=60 * 30, result_ttl=-1)
# Paranoia--validate the values we provide:
job_id = job.id
for validator in serialized.get_fields()["job_id"].validators:
validator(job_id)
statusurl = create_status_url(job_id, sub_path=self.sub_path)
for validator in serialized.get_fields()["url"].validators:
validator(statusurl)
if serialized.validated_data.get("notification_email"):
queue_notification_email(
job, statusurl,
serialized.validated_data["notification_email"])
serialized.save(job_id=job_id, url=statusurl,
destination=eregs_site_api_url)
headers = self.get_success_headers(serialized.data)
# Adding the Refresh header here so that the browser does the
# user-friendly thing of redirecting the user to the page for the
# newly-created object, even though use of the Refresh header is
# frowned upon in some circles.
#
# Not using redirect via 302 or 303 so that non-browser users get the
# 201 status code they expect upon a successful POST.
#
# I'm open to debate on this decision.
headers["Refresh"] = "0;url={0}".format(statusurl)
return Response(serialized.data, status=status.HTTP_201_CREATED,
headers=headers)
class JobViewList(mixins.ListModelMixin,
mixins.CreateModelMixin,
generics.GenericAPIView):
"""
Handles the list view for jobs of all types.
Should be subclassed along with ``BaseViewList`` for classes handling
specific job types.
"""
queryset = PipelineJob.objects.all()
renderer_classes = renderer_classes
serializer_class = PipelineJobSerializer
def filter_queryset(self, request, *args, **kwargs):
"""
Overridden in order to get data from the Redis queue as well as the DB.
Impure
Pulls information from the DB and the Redis queue.
:arg HttpRequest request: the incoming request.
:rtype: list[PipelineJob]
:returns: List of PipelineJob objects.
"""
queryset = super(JobViewList, self).filter_queryset(request, *args,
**kwargs)
queryset = add_redis_data_to_job_data(queryset)
return queryset
def get(self, request, *args, **kwargs):
return self.list(request, *args, **kwargs)
class JobViewInstance(mixins.RetrieveModelMixin,
mixins.UpdateModelMixin,
mixins.DestroyModelMixin,
generics.GenericAPIView):
queryset = PipelineJob.objects.all()
renderer_classes = renderer_classes
lookup_field = "job_id"
serializer_class = PipelineJobSerializer
def delete(self, request, *args, **kwargs):
return self.destroy(request, *args, **kwargs)
def destroy(self, request, *args, **kwargs):
"""
Overridden in order to remove the job from the Redis queue as well as
the DB.
Side Effects
Via ``delete_eregs_job``, alters the Redis queue and the DB.
:arg HttpRequest request: the incoming request.
:rtype: Response
:returns: JSON or HTML of the information about the job.
"""
instance = self.get_object()
job_id = instance.job_id
self.perform_destroy(instance)
delete_eregs_job(job_id)
return Response(status=status.HTTP_204_NO_CONTENT)
def get(self, request, *args, **kwargs):
return self.retrieve(request, *args, **kwargs)
def retrieve(self, request, *args, **kwargs):
"""
Overridden in order to get data from the Redis queue as well as the DB.
Impure
Pulls information from the DB and the Redis queue.
:arg HttpRequest request: the incoming request.
:rtype: Response
:returns: JSON or HTML of the information about the job.
"""
instance = self.get_object()
instance = add_redis_data_to_job_data([instance])[0]
serializer = self.get_serializer(instance)
return Response(serializer.data)
class PipelineJobViewList(BaseViewList, JobViewList):
queryset = PipelineJob.objects.all()
serializer_class = PipelineJobSerializer
sub_path = "regulations/"
def build_eregs_args(self, validated_data):
"""
Overrides the method from ``BaseViewList`` in order to pass the
arguments appropriate for the ``pipeline`` command.
It returns a list of string components that can be passed to the
`eregs` task runner. For example::
["pipeline", "0", "0", "http://some.url/"]
:arg dict validated_data: Incoming data from the POST that's already
been validated by the serializer.
:rtype: list[str]
:returns: The components of the argument string in list form.
"""
return [
"pipeline",
str(validated_data["cfr_title"]),
str(validated_data["cfr_part"]),
eregs_site_api_url,
]
def post(self, request, *args, **kwargs):
return self.create(request, *args, **kwargs)
class PipelineJobViewInstance(JobViewInstance):
queryset = PipelineJob.objects.all()
serializer_class = PipelineJobSerializer
sub_path = "regulations/"
class ProposalPipelineJobViewList(BaseViewList, JobViewList):
queryset = ProposalPipelineJob.objects.all()
serializer_class = ProposalPipelineJobSerializer
sub_path = "notices/"
def build_eregs_args(self, validated_data):
"""
Overrides the method from ``BaseViewList`` in order to pass the
arguments appropriate for the ``proposal_pipeline`` command.
It returns a list of string components that can be passed to the
`eregs` task runner. For example::
["proposal_pipeline", "/tmp/tmp.xml", "http://some.url/"]
Impure
Reads the contents of the proposal file from the filesystem (in
future, likely some other file storage, but impure either way).
:arg dict validated_data: Incoming data from the POST that's already
been validated by the serializer.
:rtype: list[str]
:returns: The components of the argument string in list form.
"""
reg_file = RegulationFile.objects.get(
hexhash=validated_data["file_hexhash"])
# TODO: This is a total hack; we should not be storing the contents in
# the DB but reading the file from the filesystem. Only doing this
# temporarily before changing the proposal_pipeline command to work
# differently.
path = reg_file.file.storage.path(reg_file.file.name)
return [
"proposal_pipeline",
path,
eregs_site_api_url
]
def post(self, request, *args, **kwargs):
return self.create(request, *args, **kwargs)
class ProposalPipelineJobViewInstance(JobViewInstance):
queryset = ProposalPipelineJob.objects.all()
serializer_class = ProposalPipelineJobSerializer
sub_path = "notices/"
class FileUploadView(mixins.ListModelMixin, mixins.CreateModelMixin,
generics.GenericAPIView):
parser_classes = (FileUploadParser, MultiPartParser)
parser_classes = (MultiPartParser,)
serializer_class = FileUploadSerializer
queryset = RegulationFile.objects.all()
lookup_field = "hexhash"
size_limit = 100000000 # Arbitrary 100MB limit.
def create(self, request, *args, **kwargs):
"""
Overrides the ``create`` method of ``mixins.CreateModelMixin`` in order
to add the file contents to the database.
Side effects
Alters the DB.
:arg HttpRequest request: the incoming request.
:rtype: Response
:returns: JSON or HTML of the information about the file (status 201),
or about why the file couldn't be added (status 400).
"""
serialized = self.get_serializer(data=request.data)
serialized.is_valid(raise_exception=True)
uploaded_file = request.data["file"]
if uploaded_file.size > self.size_limit:
return Response(
dict(error="File too large ({0}-byte limit).".format(
self.size_limit)),
status=status.HTTP_400_BAD_REQUEST
)
if uploaded_file.multiple_chunks():
contents = b"".join(chunk for chunk in uploaded_file.chunks())
else:
contents = uploaded_file.read()
sha = hashlib.sha256(contents)
hexhash = sha.hexdigest()
filename = uploaded_file.name
url = file_url(hexhash)
if not RegulationFile.objects.filter(hexhash=hexhash).exists():
serialized.save(contents=contents, file=uploaded_file,
filename=filename, hexhash=hexhash, url=url)
headers = self.get_success_headers(serialized.data)
return Response(serialized.data, status=status.HTTP_201_CREATED,
headers=headers)
else:
return Response(dict(error="File already present."),
status=status.HTTP_400_BAD_REQUEST)
def post(self, request, *args, **kwargs):
return self.create(request, *args, **kwargs)
def get(self, request, *args, **kwargs):
return self.list(request, *args, **kwargs)
class FileUploadViewInstance(mixins.RetrieveModelMixin,
mixins.UpdateModelMixin, mixins.DestroyModelMixin,
generics.GenericAPIView):
serializer_class = FileUploadSerializer
queryset = RegulationFile.objects.all()
lookup_field = "hexhash"
def get(self, request, *args, **kwargs):
"""
Overrides the method from ``RetrieveModelMixin`` so that we return the
contents of the file instead of a JSON object representing the file.
Impure
Reads from the DB.
:arg HttpRequest request: the incoming request.
:rtype: Response
:returns: The raw contents of the file.
"""
# Is the next line the best way to kick off a 404 if there's no match?
self.retrieve(request, *args, **kwargs)
uploaded_file = RegulationFile.objects.get(hexhash=kwargs["hexhash"])
return HttpResponse(uploaded_file.contents)
def delete(self, request, *args, **kwargs):
return self.destroy(request, *args, **kwargs)
|
cc0-1.0
|
93b4002184c3cfd165d6f809949d81a0
| 36.248603
| 79
| 0.625572
| 4.414101
| false
| false
| false
| false
|
eregs/regulations-parser
|
regparser/tree/paragraph.py
|
3
|
6581
|
import hashlib
import re
from regparser.search import segments
from regparser.tree import struct
from regparser.tree.depth import markers as mtypes
p_levels = [list(mtypes.lower), list(mtypes.ints), list(mtypes.roman),
list(mtypes.upper), list(mtypes.em_ints), list(mtypes.em_roman)]
def p_level_of(marker):
"""Given a marker(string), determine the possible paragraph levels it
could fall into. This is useful for determining the order of
paragraphs"""
potential_levels = []
for level, markers in enumerate(p_levels):
if marker in markers:
potential_levels.append(level)
return potential_levels
_NONWORDS = re.compile(r'\W+')
def hash_for_paragraph(text):
"""Hash a chunk of text and convert it into an integer for use with a
MARKERLESS paragraph identifier. We'll trim to just 8 hex characters for
legibility. We don't need to fear hash collisions as we'll have 16**8 ~ 4
billion possibilities. The birthday paradox tells us we'd only expect
collisions after ~ 60 thousand entries. We're expecting at most a few
hundred"""
phrase = _NONWORDS.sub('', text.lower())
hashed = hashlib.sha1(phrase.encode('utf-8')).hexdigest()[:8]
return int(hashed, 16)
class ParagraphParser():
def __init__(self, p_regex, node_type):
"""p_regex is the regular expression used when searching through
paragraphs. It should contain a %s for the next paragraph 'part'
(e.g. 'a', 'A', '1', 'i', etc.) inner_label_fn is a function which
takes the current label, and the next paragraph 'part' and produces
a new label."""
self.p_regex = p_regex
self.node_type = node_type
@staticmethod
def matching_subparagraph_ids(p_level, paragraph):
"""Return a list of matches if this paragraph id matches one of the
subparagraph ids (e.g. letter (i) and roman numeral (i)."""
matches = []
for depth in range(p_level + 1, len(p_levels)):
for sub_id, sub in enumerate(p_levels[depth]):
if sub == p_levels[p_level][paragraph]:
matches.append((depth, sub_id))
return matches
def best_start(self, text, p_level, paragraph, starts, exclude=None):
"""Given a list of potential paragraph starts, pick the best based
on knowledge of subparagraph structure. Do this by checking if the
id following the subparagraph (e.g. ii) is between the first match
and the second. If so, skip it, as that implies the first match was
a subparagraph."""
if exclude is None:
exclude = []
subparagraph_hazards = self.matching_subparagraph_ids(
p_level, paragraph)
starts = starts + [(len(text), len(text))]
for i in range(1, len(starts)):
_, prev_end = starts[i - 1]
next_start, _ = starts[i]
s_text = text[prev_end:next_start]
s_exclude = [
(e_start + prev_end, e_end + prev_end)
for e_start, e_end in exclude]
is_subparagraph = False
for hazard_level, hazard_idx in subparagraph_hazards:
if self.find_paragraph_start_match(
s_text, hazard_level, hazard_idx + 1, s_exclude):
is_subparagraph = True
if not is_subparagraph:
return starts[i - 1]
def find_paragraph_start_match(self, text, p_level, paragraph,
exclude=None):
"""Find the positions for the start and end of the requested label.
p_Level is one of 0,1,2,3; paragraph is the index within that label.
Return None if not present. Does not return results in the exclude
list (a list of start/stop indices). """
if exclude is None:
exclude = []
if len(p_levels) <= p_level or len(p_levels[p_level]) <= paragraph:
return None
match_starts = [(m.start(), m.end()) for m in re.finditer(
self.p_regex.format(p_levels[p_level][paragraph]), text)]
match_starts = [
(start, end) for start, end in match_starts
if all(end < es or start > ee for es, ee in exclude)]
if len(match_starts) == 0:
return None
elif len(match_starts) == 1:
return match_starts[0]
else:
return self.best_start(
text, p_level, paragraph, match_starts, exclude)
def paragraph_offsets(self, text, p_level, paragraph, exclude=None):
"""Find the start/end of the requested paragraph. Assumes the text
does not just up a p_level -- see build_paragraph_tree below."""
if exclude is None:
exclude = []
start = self.find_paragraph_start_match(
text, p_level, paragraph, exclude)
if start is None:
return None
id_start, id_end = start
end = self.find_paragraph_start_match(
text[id_end:], p_level, paragraph + 1,
[(e_start - id_end, e_end - id_end) for e_start, e_end in exclude]
)
if end is None:
end = len(text)
else:
end = end[0] + id_end
return (id_start, end)
def paragraphs(self, text, p_level, exclude=None):
"""Return a list of paragraph offsets defined by the level param."""
if exclude is None:
exclude = []
def offsets_fn(remaining_text, p_idx, exclude):
return self.paragraph_offsets(
remaining_text, p_level, p_idx, exclude)
return segments(text, offsets_fn, exclude)
def build_tree(self, text, p_level=0, exclude=None, label=None, title=''):
"""Build a dict to represent the text hierarchy."""
if exclude is None:
exclude = []
if label is None:
label = []
subparagraphs = self.paragraphs(text, p_level, exclude)
if subparagraphs:
body_text = text[0:subparagraphs[0][0]]
else:
body_text = text
children = []
for paragraph, (start, end) in enumerate(subparagraphs):
new_text = text[start:end]
new_excludes = [(e[0] - start, e[1] - start) for e in exclude]
new_label = label + [p_levels[p_level][paragraph]]
children.append(
self.build_tree(
new_text, p_level + 1, new_excludes, new_label))
return struct.Node(body_text, children, label, title, self.node_type)
|
cc0-1.0
|
0063e715e1ec1862b3a8fac3cdcd609d
| 40.13125
| 78
| 0.590792
| 3.933652
| false
| false
| false
| false
|
eregs/regulations-parser
|
tests/layer_internal_citations_tests.py
|
3
|
15457
|
# vim: set encoding=utf-8
from unittest import TestCase
from regparser.layer import internal_citations
from regparser.tree.struct import Node
class ParseTest(TestCase):
def setUp(self):
self.parser = internal_citations.InternalCitationParser(
None, cfr_title=None)
self.parser.verify_citations = False
def test_process_method(self):
node = Node("The requirements in paragraph (a)(4)(iii) of",
label=['1005', '6'])
citations = self.parser.process(node)
self.assertEqual(len(citations), 1)
def test_underparagraph(self):
text = 'Something something underparagraphs (a)(4) through (5)'
citations = self.parser.process(Node(text, label=['1005', '6']))
self.assertEqual(len(citations), 2)
def test_except_for(self):
text = 'paragraph (b)(2)(i) through (b)(2)(v) except for '
text += '(b)(2)(i)(D) and (b)(2)(vii) through (b)(2)(xiv)'
citations = self.parser.process(Node(text, label=['1005', '6']))
self.assertEqual(len(citations), 5)
self.assertEqual(citations[0]['citation'],
['1005', '6', 'b', '2', 'i'])
self.assertEqual(citations[1]['citation'],
['1005', '6', 'b', '2', 'v'])
self.assertEqual(citations[2]['citation'],
['1005', '6', 'b', '2', 'i', 'D'])
self.assertEqual(citations[3]['citation'],
['1005', '6', 'b', '2', 'vii'])
self.assertEqual(citations[4]['citation'],
['1005', '6', 'b', '2', 'xiv'])
text = 'paragraph (b)(2)(i) through (b)(2)(v) (except for '
text += '(b)(2)(i)(D)) and (b)(2)(vii) through (b)(2)(xiv)'
citations = self.parser.process(Node(text, label=['1005', '6']))
self.assertEqual(len(citations), 5)
self.assertEqual(citations[0]['citation'],
['1005', '6', 'b', '2', 'i'])
self.assertEqual(citations[1]['citation'],
['1005', '6', 'b', '2', 'v'])
self.assertEqual(citations[2]['citation'],
['1005', '6', 'b', '2', 'i', 'D'])
self.assertEqual(citations[3]['citation'],
['1005', '6', 'b', '2', 'vii'])
self.assertEqual(citations[4]['citation'],
['1005', '6', 'b', '2', 'xiv'])
def test_multiple_paragraphs(self):
""" Ensure that offsets work correctly in a simple multiple paragraph
scenario. """
text = u"the requirements of paragraphs (c)(3), (d)(2), (e)(1), "
text += "(e)(3), and (f) of this section"
citations = self.parser.process(Node(text, label=['1005', '6']))
self.assertEqual(len(citations), 5)
for c in citations:
if c['citation'] == ['1005', '6', u'c', u'3']:
self.assertEqual(text[c['offsets'][0][0]], '(')
self.assertEquals(c['offsets'], [(31, 37)])
self.assertEquals(text[c['offsets'][0][0] + 1], 'c')
if c['citation'] == ['1005', '6', u'd', u'2']:
self.assertEquals(text[c['offsets'][0][0] + 1], 'd')
def test_multiple_paragraph_or(self):
""" Ensure that an 'or' between internal citations is matched
correctly. """
text = u"set forth in paragraphs (b)(1) or (b)(2)"
citations = self.parser.process(Node(text, label=['1005', '6']))
self.assertEquals(2, len(citations))
def test_single_paragraph(self):
""" Ensure that offsets work correctly in a simple single paragraph
citation. """
text = 'The requirements in paragraph (a)(4)(iii) of'
citations = self.parser.process(Node(text, label=['1005', '6']))
c = citations[0]
self.assertEquals(text[c['offsets'][0][0]:c['offsets'][0][1]],
u'(a)(4)(iii)')
self.assertEquals(['1005', '6', 'a', '4', 'iii'], c['citation'])
def test_single_labeled_paragraph(self):
""" Ensure the parser doesn't pick up unecessary elements, such as the
(a) in the text below. """
text = '(a) Solicited issuance. Except as provided in paragraph (b) '
text += 'of this section'
citations = self.parser.process(Node(text, label=['1005', '6']))
self.assertEqual(1, len(citations))
def test_multiple_section_citation(self):
""" Ensure that offsets work correctly in a simple multiple section
citation case. """
text = u"set forth in §§ 1005.6(b)(3) and 1005.11 (b)(1)(i) from 60 "
text += "days"
citations = self.parser.process(Node(text, label=['1005', '6']))
self.assertEqual(len(citations), 2)
occurrences = 0
for c in citations:
if c['citation'] == [u'1005', u'6', u'b', u'3']:
occurrences += 1
self.assertEquals(text[c['offsets'][0][0]:c['offsets'][0][1]],
u'1005.6(b)(3)')
if c['citation'] == [u'1005', u'11', u'b', u'1', u'i']:
occurrences += 1
self.assertEquals(text[c['offsets'][0][0]:c['offsets'][0][1]],
u'1005.11 (b)(1)(i)')
self.assertEquals(occurrences, 2)
def test_single_section_citation(self):
""" Ensure that offsets work correctly in a simple single section
citation case. """
text = u"date in § 1005.20(h)(1) must disclose"
citations = self.parser.process(Node(text, label=['1005', '6']))
c = citations[0]
self.assertEquals(text[c['offsets'][0][0]:c['offsets'][0][1]],
u'1005.20(h)(1)')
def test_multiple_paragraph_single_section(self):
text = u'§ 1005.10(a) and (d)'
result = self.parser.process(Node(text, label=['1005', '6']))
self.assertEqual(2, len(result))
self.assertEqual(['1005', '10', 'a'], result[0]['citation'])
self.assertEqual(['1005', '10', 'd'], result[1]['citation'])
start, end = result[0]['offsets'][0]
self.assertEqual(u'1005.10(a)', text[start:end])
start, end = result[1]['offsets'][0]
self.assertEqual(u'(d)', text[start:end])
def test_multiple_paragraph_single_section2(self):
text = u'§ 1005.7(b)(1), (2) and (3)'
result = self.parser.process(Node(text, label=['1005', '6']))
self.assertEqual(3, len(result))
self.assertEqual(['1005', '7', 'b', '1'], result[0]['citation'])
self.assertEqual(['1005', '7', 'b', '2'], result[1]['citation'])
self.assertEqual(['1005', '7', 'b', '3'], result[2]['citation'])
start, end = result[0]['offsets'][0]
self.assertEqual(u'1005.7(b)(1)', text[start:end])
start, end = result[1]['offsets'][0]
self.assertEqual(u'(2)', text[start:end])
start, end = result[2]['offsets'][0]
self.assertEqual(u'(3)', text[start:end])
def test_multiple_paragraphs_this_section(self):
text = u'paragraphs (c)(1) and (2) of this section'
result = self.parser.process(Node(text, label=['1005', '6']))
self.assertEqual(2, len(result))
self.assertEqual(['1005', '6', 'c', '1'], result[0]['citation'])
self.assertEqual(['1005', '6', 'c', '2'], result[1]['citation'])
start, end = result[0]['offsets'][0]
self.assertEqual(u'(c)(1)', text[start:end])
start, end = result[1]['offsets'][0]
self.assertEqual(u'(2)', text[start:end])
def test_multiple_paragraphs_max_depth(self):
text = u'see paragraphs (z)(9)(vi)(A) and (D)'
results = self.parser.process(Node(text, label=['999', '88']))
self.assertEqual(2, len(results))
resultA, resultD = results
self.assertEqual(['999', '88', 'z', '9', 'vi', 'A'],
resultA['citation'])
offsets = resultA['offsets'][0]
self.assertEqual('(z)(9)(vi)(A)', text[offsets[0]:offsets[1]])
self.assertEqual(['999', '88', 'z', '9', 'vi', 'D'],
resultD['citation'])
offsets = resultD['offsets'][0]
self.assertEqual('(D)', text[offsets[0]:offsets[1]])
def _assert_paragraphs(self, text, label, text_to_labels):
"""Given text to search, a node label, and a mapping between text in
the original and citation labels, verify that the citations can be
found in the text"""
results = self.parser.process(Node(text, label=label))
self.assertEqual(len(text_to_labels), len(results))
for result in results:
start, end = result['offsets'][0]
key = text[start:end]
self.assertEqual(text_to_labels[key], result['citation'])
def test_multiple_paragraphs_alpha_then_roman(self):
self._assert_paragraphs(
'paragraphs (b)(1)(ii) and (iii)', ['1005', '6'],
{'(b)(1)(ii)': ['1005', '6', 'b', '1', 'ii'],
'(iii)': ['1005', '6', 'b', '1', 'iii']})
self._assert_paragraphs(
u'§ 1005.15(d)(1)(i) and (ii)', ['1005', '15'],
{'1005.15(d)(1)(i)': ['1005', '15', 'd', '1', 'i'],
'(ii)': ['1005', '15', 'd', '1', 'ii']})
self._assert_paragraphs(
u'§ 1005.9(a)(5) (i), (ii), or (iii)', ['1005', '9'],
{'1005.9(a)(5) (i)': ['1005', '9', 'a', '5', 'i'],
'(ii)': ['1005', '9', 'a', '5', 'ii'],
'(iii)': ['1005', '9', 'a', '5', 'iii']})
self._assert_paragraphs(
u'§ 1005.11(a)(1)(vi) or (vii).', ['1005', '11'],
{'1005.11(a)(1)(vi)': ['1005', '11', 'a', '1', 'vi'],
'(vii)': ['1005', '11', 'a', '1', 'vii']})
def test_appendix_citation(self):
self._assert_paragraphs(
"Please see A-5 and Q-2(r) and Z-12(g)(2)(ii) then more text",
['1005', '10'],
{'A-5': ['1005', 'A', '5'],
'Q-2(r)': ['1005', 'Q', '2(r)'],
'Z-12(g)(2)(ii)': ['1005', 'Z', '12(g)(2)(ii)']})
def test_section_verbose(self):
self._assert_paragraphs(
"And Section 222.87(d)(2)(i) says something", ['222', '87'],
{'222.87(d)(2)(i)': ['222', '87', 'd', '2', 'i']})
self._assert_paragraphs(
"Listing sections 11.55(d) and 321.11 (h)(4)", ['222', '87'],
{'11.55(d)': ['11', '55', 'd'],
'321.11 (h)(4)': ['321', '11', 'h', '4']})
def test_comment_header(self):
self._assert_paragraphs(
"See comment 32(b)(3) blah blah", ['222', '87'],
{'32(b)(3)': ['222', '32', 'b', '3', Node.INTERP_MARK]})
def test_sub_comment(self):
self._assert_paragraphs(
"refer to comment 36(a)(2)-3 of thing", ['222', '87'],
{'36(a)(2)-3': ['222', '36', 'a', '2', Node.INTERP_MARK, '3']})
self._assert_paragraphs(
"See comment 3(b)(1)-1.v.", ['222', '87'],
# Note the final period is not included
{'3(b)(1)-1.v': ['222', '3', 'b', '1', Node.INTERP_MARK, '1',
'v']})
def test_multiple_comments(self):
text = "See, e.g., comments 31(b)(1)(iv)-1 and 31(b)(1)(vi)-1"
result = self.parser.process(Node(text, label=['222', '87']))
self.assertEqual(2, len(result))
self.assertEqual(['222', '31', 'b', '1', 'iv', Node.INTERP_MARK, '1'],
result[0]['citation'])
offsets = result[0]['offsets'][0]
self.assertEqual('31(b)(1)(iv)-1', text[offsets[0]:offsets[1]])
self.assertEqual(['222', '31', 'b', '1', 'vi', Node.INTERP_MARK, '1'],
result[1]['citation'])
offsets = result[1]['offsets'][0]
self.assertEqual('31(b)(1)(vi)-1', text[offsets[0]:offsets[1]])
def test_paren_in_interps(self):
text = "covers everything except paragraph (d)(3)(i) of this section"
result = self.parser.process(
Node(text, label=['222', '87', Node.INTERP_MARK]))
self.assertEqual(1, len(result))
self.assertEqual(['222', '87', 'd', '3', 'i'], result[0]['citation'])
offsets = result[0]['offsets'][0]
self.assertEqual('(d)(3)(i)', text[offsets[0]:offsets[1]])
result = self.parser.process(
Node(text, label=['222', '87', 'd', '3', Node.INTERP_MARK]))
self.assertEqual(1, len(result))
self.assertEqual(['222', '87', 'd', '3', 'i'], result[0]['citation'])
offsets = result[0]['offsets'][0]
self.assertEqual('(d)(3)(i)', text[offsets[0]:offsets[1]])
def test_cfr_format(self):
"""We aren't processing this form yet"""
text = "12 CFR 1026.3(d)"
result = self.parser.process(Node(text, label=['1111']))
self.assertEqual(None, result)
def test_verify_citations(self):
tree = Node(label=['1111', '2', '3'],
children=[Node(label=['222', '1', '1']),
Node(label=['222', '1', '1'],
children=[Node(label=['111', '34'])])])
parser = internal_citations.InternalCitationParser(
tree, cfr_title=None)
parser.pre_process()
self.assertEqual(parser.known_citations, {
('1111', '2', '3'), ('222', '1', '1'), ('111', '34')})
parser.verify_citations = False
text = 'Section 111.34 and paragraph (c)'
result = parser.process(Node(text))
self.assertEqual(2, len(result))
parser.verify_citations = True
result = parser.process(Node(text))
self.assertEqual(1, len(result))
start, end = result[0]['offsets'][0]
self.assertEqual('111.34', text[start:end].strip())
def test_internal_cfr_format(self):
text = 'under 11 CFR 110.14 are not subject'
self.parser.cfr_title = '11'
result = self.parser.process(Node(text, label=['110', '1']))
self.assertEqual(1, len(result))
self.assertEqual(['110', '14'], result[0]['citation'])
offsets = result[0]['offsets'][0]
self.assertEqual('11 CFR 110.14', text[offsets[0]:offsets[1]])
# Verify that CFR citations from other titles do not get caught.
self.parser.cfr_title = '12'
result = self.parser.process(Node(text, label=['110', '1']))
self.assertEqual(None, result)
# Verify that CFR citations from other parts do not get caught.
self.parser.cfr_title = '11'
result = self.parser.process(Node(text, label=['111', '1']))
self.assertEqual(None, result)
def test_multiple_internal_cfr(self):
text = 'prohibited from making contributions under 11 CFR 110.19, '
text += '110.20, and 110.21'
self.parser.cfr_title = '11'
result = self.parser.process(Node(text, label=['110', '1']))
self.assertEqual(3, len(result))
self.assertEqual(['110', '19'], result[0]['citation'])
offsets = result[0]['offsets'][0]
self.assertEqual('11 CFR 110.19', text[offsets[0]:offsets[1]])
self.assertEqual(['110', '20'], result[1]['citation'])
offsets = result[1]['offsets'][0]
self.assertEqual('110.20', text[offsets[0]:offsets[1]])
self.assertEqual(['110', '21'], result[2]['citation'])
offsets = result[2]['offsets'][0]
self.assertEqual('110.21', text[offsets[0]:offsets[1]])
|
cc0-1.0
|
a9b037befed4a0cbb25fd3a6e111a831
| 46.10061
| 78
| 0.521846
| 3.362133
| false
| true
| false
| false
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.