repo_name
stringlengths 7
65
| path
stringlengths 5
186
| copies
stringlengths 1
4
| size
stringlengths 4
6
| content
stringlengths 941
973k
| license
stringclasses 14
values | hash
stringlengths 32
32
| line_mean
float64 5
100
| line_max
int64 26
999
| alpha_frac
float64 0.25
0.93
| ratio
float64 1.5
7.35
| autogenerated
bool 1
class | config_or_test
bool 2
classes | has_no_keywords
bool 2
classes | has_few_assignments
bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
mozilla/normandy
|
normandy/recipes/tests/api/v3/test_shield_identicon.py
|
1
|
1396
|
import pytest
from normandy.recipes.api.v3.shield_identicon import Genome
@pytest.fixture
def genome():
seed = 123
return Genome(seed)
class TestGenome(object):
"""
Tests the Genome module by setting the seed to a known value and making sure that the
random choices remain consistent, ie. they do not change over time.
"""
def test_weighted_choice(self, genome):
weighted_options = [
{"weight": 1, "value": "apple"},
{"weight": 2, "value": "orange"},
{"weight": 4, "value": "strawberry"},
]
weighted_choice_values = [
genome.weighted_choice(weighted_options),
genome.weighted_choice(weighted_options),
genome.weighted_choice(weighted_options),
]
assert weighted_choice_values == [
{"weight": 1, "value": "apple"},
{"weight": 2, "value": "orange"},
{"weight": 1, "value": "apple"},
]
def test_emoji(self, genome):
emoji_values = [genome.emoji(), genome.emoji(), genome.emoji()]
assert emoji_values == ["😅", "🐯", "😈"]
def test_color(self, genome):
color_values = [
genome.color().rgb_color,
genome.color().rgb_color,
genome.color().rgb_color,
]
assert color_values == [(7, 54, 66), (255, 207, 0), (88, 110, 117)]
|
mpl-2.0
|
67377af2923b76d7e308e3982934b69f
| 29.822222
| 89
| 0.553713
| 3.769022
| false
| true
| false
| false
|
mozilla/normandy
|
normandy/recipes/tests/test_checks.py
|
1
|
4881
|
from datetime import timedelta
from django.core.exceptions import ImproperlyConfigured
from django.db.utils import ProgrammingError
import pytest
import requests.exceptions
from normandy.recipes import checks, signing
from normandy.recipes.tests import ActionFactory, RecipeFactory, SignatureFactory, UserFactory
@pytest.mark.django_db
class TestSignaturesUseGoodCertificates(object):
def test_it_works(self):
assert checks.signatures_use_good_certificates(None) == []
def test_it_fails_if_a_signature_does_not_verify(self, mocker, settings):
settings.CERTIFICATES_EXPIRE_EARLY_DAYS = None
recipe = RecipeFactory(approver=UserFactory(), signed=True)
mock_verify_x5u = mocker.patch("normandy.recipes.checks.signing.verify_x5u")
mock_verify_x5u.side_effect = signing.BadCertificate("testing exception")
errors = checks.signatures_use_good_certificates(None)
mock_verify_x5u.assert_called_once_with(recipe.signature.x5u, None)
assert len(errors) == 1
assert errors[0].id == checks.ERROR_BAD_SIGNING_CERTIFICATE
assert recipe.approved_revision.name in errors[0].msg
def test_it_ignores_signatures_without_x5u(self):
recipe = RecipeFactory(approver=UserFactory(), signed=True)
recipe.signature.x5u = None
recipe.signature.save()
actions = ActionFactory(signed=True)
actions.signature.x5u = None
actions.signature.save()
assert checks.signatures_use_good_certificates(None) == []
def test_it_ignores_signatures_not_in_use(self, mocker, settings):
settings.CERTIFICATES_EXPIRE_EARLY_DAYS = None
recipe = RecipeFactory(approver=UserFactory(), signed=True)
SignatureFactory(x5u="https://example.com/bad_x5u") # unused signature
mock_verify_x5u = mocker.patch("normandy.recipes.checks.signing.verify_x5u")
def side_effect(x5u, *args):
if "bad" in x5u:
raise signing.BadCertificate("testing exception")
return True
mock_verify_x5u.side_effect = side_effect
errors = checks.signatures_use_good_certificates(None)
mock_verify_x5u.assert_called_once_with(recipe.signature.x5u, None)
assert errors == []
def test_it_passes_expire_early_setting(self, mocker, settings):
settings.CERTIFICATES_EXPIRE_EARLY_DAYS = 7
recipe = RecipeFactory(approver=UserFactory(), signed=True)
mock_verify_x5u = mocker.patch("normandy.recipes.checks.signing.verify_x5u")
errors = checks.signatures_use_good_certificates(None)
mock_verify_x5u.assert_called_once_with(recipe.signature.x5u, timedelta(7))
assert errors == []
def test_it_reports_x5u_network_errors(self, mocker):
RecipeFactory(approver=UserFactory(), signed=True)
mock_verify_x5u = mocker.patch("normandy.recipes.checks.signing.verify_x5u")
mock_verify_x5u.side_effect = requests.exceptions.ConnectionError
errors = checks.signatures_use_good_certificates(None)
mock_verify_x5u.assert_called_once()
assert len(errors) == 1
assert errors[0].id == checks.ERROR_COULD_NOT_VERIFY_CERTIFICATE
@pytest.mark.django_db
class TestRecipeSignatureAreCorrect:
def test_it_warns_if_a_field_isnt_available(self, mocker):
"""This is to allow for un-applied to migrations to not break running migrations."""
RecipeFactory(approver=UserFactory(), signed=True)
mock_canonical_json = mocker.patch("normandy.recipes.models.Recipe.canonical_json")
mock_canonical_json.side_effect = ProgrammingError("error for testing")
errors = checks.recipe_signatures_are_correct(None)
assert len(errors) == 1
assert errors[0].id == checks.WARNING_COULD_NOT_CHECK_SIGNATURES
@pytest.mark.django_db
class TestActionSignatureAreCorrect:
def test_it_warns_if_a_field_isnt_available(self, mocker):
"""This is to allow for un-applied to migrations to not break running migrations."""
ActionFactory(signed=True)
mock_canonical_json = mocker.patch("normandy.recipes.models.Action.canonical_json")
mock_canonical_json.side_effect = ProgrammingError("error for testing")
errors = checks.action_signatures_are_correct(None)
assert len(errors) == 1
assert errors[0].id == checks.WARNING_COULD_NOT_CHECK_SIGNATURES
class TestRemoteSettingsConfigIsCorrect:
def test_it_warns_if_remote_settings_config_is_incorrect(self, mocker):
mock_check_config = mocker.patch("normandy.recipes.exports.RemoteSettings.check_config")
mock_check_config.side_effect = ImproperlyConfigured("error for testing")
errors = checks.remotesettings_config_is_correct(None)
assert len(errors) == 1
assert errors[0].id == checks.ERROR_REMOTE_SETTINGS_INCORRECT_CONFIG
|
mpl-2.0
|
cf8ba8829b7a8bf7b06807e3c4f10a75
| 45.485714
| 96
| 0.708052
| 3.700531
| false
| true
| false
| false
|
mozilla/normandy
|
normandy/recipes/api/filters.py
|
1
|
4813
|
import django_filters
from rest_framework import serializers
from normandy.recipes.models import Recipe
class EnabledStateFilter(django_filters.Filter):
"""A special case filter for filtering recipes by their enabled state"""
def filter(self, qs, value):
if value is not None:
lc_value = value.lower()
if lc_value in ["true", "1"]:
return qs.only_enabled()
elif lc_value in ["false", "0"]:
return qs.only_disabled()
return qs
class ApprovalStateFilter(django_filters.Filter):
"""A special case filter for filtering approval requests by their approval state"""
def filter(self, qs, value):
if value is None:
return qs
lc_value = value.lower()
if lc_value in ["true", "1", "approved"]:
return qs.filter(approved=True)
elif lc_value in ["false", "0", "rejected"]:
return qs.filter(approved=False)
elif lc_value in ["null", "pending"]:
return qs.filter(approved=None)
class BaselineCapabilitiesFilter(django_filters.Filter):
"""Filters recipe by whether they use only baseline capabilities, defaulting to only baseline."""
def __init__(self, *args, default_only_baseline=False, **kwargs):
super().__init__(*args, **kwargs)
self.default_only_baseline = default_only_baseline
def filter(self, qs, value):
baseline_only = self.default_only_baseline
if value is not None:
lc_value = value.lower()
baseline_only = lc_value in ["true", "1"]
if baseline_only:
recipes = list(qs)
if not all(isinstance(recipe, Recipe) for recipe in recipes):
raise TypeError("BaselineCapabilitiesFilter can only be used to filter recipes")
match_ids = []
for recipe in recipes:
if (
recipe.approved_revision
and recipe.approved_revision.uses_only_baseline_capabilities()
):
match_ids.append(recipe.id)
return Recipe.objects.filter(id__in=match_ids)
return qs
class CharSplitFilter(django_filters.CharFilter):
"""Custom CharFilter class that splits the value (if it's set) by `,` into a list
and uses the `__in` operator."""
def filter(self, qs, value):
if value:
qs = qs.filter(**{"{}__in".format(self.field_name): value.split(",")})
return qs
class FilterObjectFieldFilter(django_filters.Filter):
"""
Find recipes that have a filter object with the given field
Format for the filter's value is `key1:value1,key2:value2`. This would
include recipes that have a filter object that has a field `key1` that
contains the value `value1`, and that have a filter object with a field
`key2` that contains `value2`. The two filter objects do not have to be
the same, but may be.
"""
def filter(self, qs, value):
if value is None:
return qs
needles = []
for segment in value.split(","):
if ":" not in segment:
raise serializers.ValidationError(
{"filter_object": "Filters must be of the format `key1:val1,key2:val2,..."}
)
key, val = segment.split(":", 1)
needles.append((key, val))
# Let the database do a first pass filter
for k, v in needles:
qs = qs.filter(latest_revision__filter_object_json__contains=k)
qs = qs.filter(latest_revision__filter_object_json__contains=v)
recipes = list(qs)
if not all(isinstance(recipe, Recipe) for recipe in recipes):
raise TypeError("FilterObjectFieldFilter can only be used to filter recipes")
# For every recipe that contains the right substrings, look through
# their filter objects for an actual match
match_ids = []
for recipe in recipes:
recipe_matches = True
# Recipes needs to have all the keys and values in the needles
for k, v in needles:
for filter_object in recipe.latest_revision.filter_object:
# Don't consider invalid filter objects
if not filter_object.is_valid():
continue
if k in filter_object.data and v in str(filter_object.data[k]):
# Found a match
break
else:
# Did not break, so no match was not found
recipe_matches = False
break
if recipe_matches:
match_ids.append(recipe.id)
return Recipe.objects.filter(id__in=match_ids)
|
mpl-2.0
|
af41c1ff1cd7430c0dab073b7a6dec19
| 35.462121
| 101
| 0.584043
| 4.363554
| false
| false
| false
| false
|
mozilla/normandy
|
normandy/recipes/migrations/0008_auto_20180510_2252.py
|
1
|
1967
|
# Generated by Django 2.0.5 on 2018-05-10 22:52
# flake8: noqa
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import django.utils.timezone
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
("recipes", "0007_convert_simple_filters_to_filter_objects"),
]
operations = [
migrations.CreateModel(
name="EnabledState",
fields=[
(
"id",
models.AutoField(
auto_created=True, primary_key=True, serialize=False, verbose_name="ID"
),
),
("created", models.DateTimeField(default=django.utils.timezone.now)),
("enabled", models.BooleanField(default=False)),
(
"creator",
models.ForeignKey(
null=True,
on_delete=django.db.models.deletion.SET_NULL,
related_name="enabled_states",
to=settings.AUTH_USER_MODEL,
),
),
(
"revision",
models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE,
related_name="enabled_states",
to="recipes.RecipeRevision",
),
),
],
options={"ordering": ("-created",)},
),
migrations.AddField(
model_name="reciperevision",
name="enabled_state",
field=models.ForeignKey(
null=True,
on_delete=django.db.models.deletion.SET_NULL,
related_name="current_for_revision",
to="recipes.EnabledState",
),
),
]
|
mpl-2.0
|
2401bc713daafd41216f75005b3da123
| 32.338983
| 95
| 0.475852
| 5.122396
| false
| false
| false
| false
|
mozilla/normandy
|
normandy/recipes/exports.py
|
1
|
8717
|
import logging
import kinto_http
from django.conf import settings
from django.core.exceptions import ImproperlyConfigured
from normandy.base.utils import ScopedSettings
APPROVE_CHANGES_FLAG = {"status": "to-sign"}
ROLLBACK_CHANGES_FLAG = {"status": "to-rollback"}
logger = logging.getLogger(__name__)
rs_settings = ScopedSettings("REMOTE_SETTINGS_")
def recipe_as_record(recipe):
"""
Transform a recipe to a dict with the minimum amount of fields needed for clients
to verify and execute recipes.
:param recipe: a recipe ready to be exported.
:returns: a dict to be posted on Remote Settings.
"""
from normandy.recipes.api.v1.serializers import (
MinimalRecipeSerializer,
SignatureSerializer,
) # avoid circular imports
record = {
"id": str(recipe.id),
"recipe": MinimalRecipeSerializer(recipe).data,
"signature": SignatureSerializer(recipe.signature).data,
}
return record
class RemoteSettings:
"""
Interacts with a RemoteSettings service.
Recipes get published as records in one or both of the dedicated
collections on Remote Settings. When disabled, those records are removed.
Since Normandy already has the required approval/signoff features, the integration
bypasses the one of Remote Settings (leveraging a specific server configuration for this
particular collection).
There are two collections used. One is the "baseline" collection, which
is used only for recipes that fit within the baseline capabilities, and
are therefore compatible with a broad range of clients. The second is the
"capabilities" collection, in which all recipes are published. Clients
that read from the capabilities collection are expected to process
capabilities and only execute compatible recipes.
.. notes::
Remote Settings signoff workflow relies on several buckets (see kinto-signer API).
The ``main-workspace`` is only readable and writable by authorized accounts.
The ``main`` bucket is read-only, but publicly readable. The Remote Settings
clients pull data from there.
Since the review step is disabled for Normandy, publishing data is done in two steps:
1. Create, update or delete records in the ``main-workspace`` bucket
2. Approve the changes by flipping the ``status`` field to ``to-sign``
in the collection metadata
3. The server will sign and publish the new data to the ``main`` bucket.
"""
def __init__(self):
# Kinto is the underlying implementation of Remote Settings. The client
# is basically a tiny abstraction on top of the requests library.
self.client = (
kinto_http.Client(
server_url=rs_settings.URL,
auth=(rs_settings.USERNAME, rs_settings.PASSWORD),
retry=rs_settings.RETRY_REQUESTS,
)
if rs_settings.URL
else None
)
def check_config(self):
"""
Verify that integration with Remote Settings is configured properly.
"""
if self.client is None:
return # no check if disabled.
required_keys = [
"CAPABILITIES_COLLECTION_ID",
"WORKSPACE_BUCKET_ID",
"PUBLISH_BUCKET_ID",
"USERNAME",
"PASSWORD",
]
for key in required_keys:
if not getattr(settings, f"REMOTE_SETTINGS_{key}"):
msg = f"set settings.REMOTE_SETTINGS_{key} to use Remote Settings integration"
raise ImproperlyConfigured(msg)
# Test authentication.
server_info = self.client.server_info()
is_authenticated = (
"user" in server_info and rs_settings.USERNAME in server_info["user"]["id"]
)
if not is_authenticated:
raise ImproperlyConfigured("Invalid Remote Settings credentials")
# Test that collection is writable.
bucket = rs_settings.WORKSPACE_BUCKET_ID
collection = rs_settings.CAPABILITIES_COLLECTION_ID
metadata = self.client.get_collection(id=collection, bucket=bucket)
if server_info["user"]["id"] not in metadata["permissions"].get("write", []):
raise ImproperlyConfigured(
f"Remote Settings collection {collection} is not writable in bucket {bucket}."
)
# Test that collection has the proper review settings.
capabilities = server_info["capabilities"]
if "signer" in capabilities:
signer_config = capabilities["signer"]
normandy_resource = [
r
for r in signer_config["resources"]
if r["source"]["bucket"] == bucket and r["source"]["collection"] == collection
]
review_disabled = len(normandy_resource) == 1 and not normandy_resource[0].get(
"to_review_enabled", signer_config["to_review_enabled"]
)
if not review_disabled:
raise ImproperlyConfigured(
f"Review was not disabled on Remote Settings collection {collection}."
)
def published_recipes(self):
"""
Return the current list of remote records.
"""
if self.client is None:
raise ImproperlyConfigured("Remote Settings is not enabled.")
capabilities_records = self.client.get_records(
bucket=rs_settings.PUBLISH_BUCKET_ID, collection=rs_settings.CAPABILITIES_COLLECTION_ID
)
return capabilities_records
def publish(self, recipe, approve_changes=True):
"""
Publish the specified `recipe` on the remote server by upserting a record.
"""
if self.client is None:
return # no-op if disabled.
# 1. Put the record.
record = recipe_as_record(recipe)
self.client.update_record(
data=record,
bucket=rs_settings.WORKSPACE_BUCKET_ID,
collection=rs_settings.CAPABILITIES_COLLECTION_ID,
)
# 2. Approve the changes immediately (multi-signoff is disabled).
log_action = "Batch published"
if approve_changes:
self.approve_changes()
log_action = "Published"
logger.info(
f"{log_action} record '{recipe.id}' for recipe {recipe.approved_revision.name!r}"
)
def unpublish(self, recipe, approve_changes=True):
"""
Unpublish the specified `recipe` by deleted its associated record on the remote server.
"""
if self.client is None:
return # no-op if disabled.
# 1. Delete the record
either_existed = False
try:
self.client.delete_record(
id=str(recipe.id),
bucket=rs_settings.WORKSPACE_BUCKET_ID,
collection=rs_settings.CAPABILITIES_COLLECTION_ID,
)
either_existed = True
except kinto_http.KintoException as e:
if e.response.status_code == 404:
logger.warning(
f"The recipe '{recipe.id}' was not published in the capabilities collection. Skip."
)
else:
raise
# 2. Approve the changes immediately (multi-signoff is disabled).
log_action = "Batch deleted"
if either_existed and approve_changes:
self.approve_changes()
log_action = "Deleted"
logger.info(
f"{log_action} record '{recipe.id}' of recipe {recipe.approved_revision.name!r}"
)
def approve_changes(self):
"""
Approve the changes made in the workspace collection.
.. note::
This only works because multi-signoff is disabled for the Normandy recipes
in configuration (see :ref:`remote-settings-install`)
"""
if self.client is None:
return # no-op if disabled.
try:
self.client.patch_collection(
id=rs_settings.CAPABILITIES_COLLECTION_ID,
data=APPROVE_CHANGES_FLAG,
bucket=rs_settings.WORKSPACE_BUCKET_ID,
)
logger.info("Changes were approved.")
except kinto_http.exceptions.KintoException:
# Approval failed unexpectedly.
# The changes in the `main-workspace` bucket must be reverted.
self.client.patch_collection(
id=rs_settings.CAPABILITIES_COLLECTION_ID,
data=ROLLBACK_CHANGES_FLAG,
bucket=rs_settings.WORKSPACE_BUCKET_ID,
)
raise
|
mpl-2.0
|
34b143a423a75644a2ca83f9b7e6a801
| 35.780591
| 103
| 0.614087
| 4.571054
| false
| true
| false
| false
|
mozilla/normandy
|
normandy/recipes/migrations/0009_auto_20180510_2328.py
|
1
|
1037
|
# Generated by Django 2.0.5 on 2018-05-10 23:28
from django.db import migrations
def enabled_to_enabled_state(apps, schema_editor):
Recipe = apps.get_model("recipes", "Recipe")
EnabledState = apps.get_model("recipes", "EnabledState")
for recipe in Recipe.objects.filter(enabled=True):
if recipe.approved_revision:
es = EnabledState.objects.create(revision=recipe.approved_revision, enabled=True)
es.current_for_revision.add(recipe.approved_revision)
def enabled_state_to_enabled(apps, schema_editor):
Recipe = apps.get_model("recipes", "Recipe")
for recipe in Recipe.objects.exclude(approved_revision=None):
enabled_state = recipe.approved_revision.enabled_state
if enabled_state and enabled_state.enabled:
recipe.enabled = True
recipe.save()
class Migration(migrations.Migration):
dependencies = [("recipes", "0008_auto_20180510_2252")]
operations = [migrations.RunPython(enabled_to_enabled_state, enabled_state_to_enabled)]
|
mpl-2.0
|
c93381001079413db6083e24aca3a9d3
| 33.566667
| 93
| 0.702025
| 3.743682
| false
| false
| false
| false
|
mozilla/normandy
|
normandy/studies/tests/__init__.py
|
1
|
4038
|
import factory
import json
import tempfile
import zipfile
from factory.django import DjangoModelFactory
from faker import Faker
from normandy.base.tests import FuzzyUnicode
from normandy.studies.models import Extension
INSTALL_RDF_TEMPLATE = """<?xml version="1.0" encoding="utf-8"?>
<RDF xmlns="http://w3.org/1999/02/22-rdf-syntax-ns#" xmlns:em="http://mozilla.org/2004/em-rdf#">
<Description about="urn:mozilla:install-manifest">
<em:type>2</em:type>
<em:bootstrap>true</em:bootstrap>
<em:unpack>false</em:unpack>
<em:multiprocessCompatible>true</em:multiprocessCompatible>
{}
<em:targetApplication>
<Description>
<em:id>{{ec8030f7-c20a-464f-9b0e-13a3a9e97384}}</em:id>
<em:minVersion>52.0</em:minVersion>
<em:maxVersion>*</em:maxVersion>
</Description>
</em:targetApplication>
</Description>
</RDF>
"""
class XPIFileFactory(object):
def __init__(self, signed=True):
# Generate a unique random path for the new XPI file
f, self._path = tempfile.mkstemp(suffix=".xpi")
# Create a blank zip file on disk
zf = zipfile.ZipFile(self.path, mode="w")
zf.close()
if signed:
self.add_file("META-INF/manifest.mf", b"")
self.add_file("META-INF/mozilla.rsa", b"")
self.add_file("META-INF/mozilla.sf", b"")
@property
def path(self):
return self._path
def add_file(self, filename, data):
with zipfile.ZipFile(self.path, mode="a") as zf:
with zf.open(filename, mode="w") as f:
f.write(data)
def open(self, mode="rb"):
return open(self.path, mode="rb")
class WebExtensionFileFactory(XPIFileFactory):
def __init__(self, signed=True, from_file=None, gecko_id=None, overwrite_data=None):
super().__init__(signed=signed)
if not gecko_id:
gecko_id = f"{Faker().md5()}@normandy.mozilla.org"
if from_file:
self._manifest = json.load(from_file)
else:
self._manifest = {
"manifest_version": 2,
"name": "normandy test addon",
"version": "0.1",
"description": "This is an add-on for us in Normandy's tests",
"applications": {"gecko": {"id": gecko_id}},
}
if overwrite_data:
self._manifest.update(overwrite_data)
self.save_manifest()
@property
def manifest(self):
return self._manifest
def save_manifest(self):
self.add_file("manifest.json", json.dumps(self.manifest).encode())
def update_manifest(self, data):
self._manifest.update(data)
self.save_manifest()
def replace_manifest(self, data):
self._manifest = data
self.save_manifest()
class LegacyAddonFileFactory(XPIFileFactory):
def __init__(self, signed=True, from_file=None, addon_id=None, overwrite_data=None):
super().__init__(signed=signed)
if not addon_id:
name = Faker().md5()
addon_id = f"{name}@normandy.mozilla.org"
if from_file:
with open(from_file, "rb") as f:
self.add_file("install.rdf", f.read())
else:
data = {
"id": addon_id,
"version": "0.1",
"name": "Signed Bootstrap Mozilla Extension Example",
"description": "Example of a bootstrapped addon",
}
if overwrite_data:
data.update(overwrite_data)
self.generate_install_rdf(data)
def generate_install_rdf(self, data):
insert = ""
for k in data:
insert += "<em:{}>{}</em:{}>\n".format(k, data[k], k)
self.add_file("install.rdf", INSTALL_RDF_TEMPLATE.format(insert).encode())
class ExtensionFactory(DjangoModelFactory):
name = FuzzyUnicode()
xpi = factory.django.FileField(from_func=lambda: WebExtensionFileFactory().open())
class Meta:
model = Extension
|
mpl-2.0
|
193a501d87316d4193e4f9305219f558
| 28.911111
| 96
| 0.586924
| 3.628032
| false
| false
| false
| false
|
mozilla/normandy
|
normandy/recipes/management/commands/initial_data.py
|
1
|
1928
|
from django.core.management.base import BaseCommand
from django_countries import countries
from normandy.recipes.models import Channel, Country, WindowsVersion
class Command(BaseCommand):
"""
Adds some helpful initial data to the site's database. If matching
data already exists, it should _not_ be overwritten, making this
safe to run multiple times.
This exists instead of data migrations so that test runs do not load
this data into the test database.
If this file grows too big, we should consider finding a library or
coming up with a more robust way of adding this data.
"""
help = "Adds initial data to database"
def handle(self, *args, **options):
self.add_release_channels()
self.add_countries()
self.add_windows_versions()
def add_release_channels(self):
self.stdout.write("Adding Release Channels...", ending="")
channels = {
"release": "Release",
"beta": "Beta",
"aurora": "Developer Edition",
"nightly": "Nightly",
}
for slug, name in channels.items():
Channel.objects.update_or_create(slug=slug, defaults={"name": name})
self.stdout.write("Done")
def add_countries(self):
self.stdout.write("Adding Countries...", ending="")
for code, name in countries:
Country.objects.update_or_create(code=code, defaults={"name": name})
self.stdout.write("Done")
def add_windows_versions(self):
self.stdout.write("Adding Windows Versions...", ending="")
versions = [
(6.1, "Windows 7"),
(6.2, "Windows 8"),
(6.3, "Windows 8.1"),
(10.0, "Windows 10"),
]
for nt_version, name in versions:
WindowsVersion.objects.update_or_create(nt_version=nt_version, defaults={"name": name})
self.stdout.write("Done")
|
mpl-2.0
|
38c6b007ae090d295399b47eca56512a
| 32.824561
| 99
| 0.61722
| 4.17316
| false
| false
| false
| false
|
mozilla/normandy
|
contract-tests/v1_api/test_performance.py
|
1
|
3083
|
from urllib.parse import urljoin
import html5lib
import pytest
"""These are paths hit by self repair that need to be very fast"""
HOT_PATHS = [
"/en-US/repair",
"/en-US/repair/",
"/api/v1/recipe/?enabled=1",
"/api/v1/recipe/signed/?enabled=1",
"/api/v1/action/",
]
@pytest.mark.parametrize("path", HOT_PATHS)
class TestHotPaths(object):
"""
Test for performance-enhancing properties of the site.
This file does not test performance by measuring runtimes and throughput.
Instead it tests for markers of features that would speed up or slow down the
site, such as cache headers.
"""
def test_no_redirects(self, conf, requests_session, path):
r = requests_session.get(conf.getoption("server") + path)
r.raise_for_status()
assert 200 <= r.status_code < 300
def test_no_vary_cookie(self, conf, requests_session, path, only_readonly):
r = requests_session.get(conf.getoption("server") + path)
r.raise_for_status()
assert "cookie" not in r.headers.get("vary", "").lower()
def test_cache_headers(self, conf, requests_session, path, only_readonly):
if path.startswith("/api/"):
pytest.xfail("caching temporarily hidden on api by nginx")
r = requests_session.get(conf.getoption("server") + path)
r.raise_for_status()
cache_control = r.headers.get("cache-control")
assert cache_control is not None
# parse cache-control header.
parts = [part.strip() for part in cache_control.split(",")]
max_age = [part for part in parts if part.startswith("max-age=")][0]
max_age_seconds = int(max_age.split("=")[1])
assert "public" in parts
assert max_age_seconds > 0
def test_static_cache_headers(conf, requests_session):
"""Test that all scripts included from self-repair have long lived cache headers"""
req = requests_session.get(conf.getoption("server") + "/en-US/repair")
req.raise_for_status()
document = html5lib.parse(req.content, treebuilder="dom")
scripts = document.getElementsByTagName("script")
for script in scripts:
src = script.getAttribute("src")
url = urljoin(conf.getoption("server"), src)
script_req = requests_session.get(url)
script_req.raise_for_status()
cache_control = parse_cache_control(script_req.headers["cache-control"])
assert cache_control["public"], f"Cache-control: public for {url}"
ONE_YEAR = 31_536_000
assert cache_control["max-age"] >= ONE_YEAR, f"Cache-control: max-age > 1 year for {url}"
assert cache_control["immutable"], f"Cache-control: immutable for {url}"
def parse_cache_control(header):
parsed = {}
parts = header.split(",")
for part in parts:
part = part.strip()
if "=" in part:
key, val = part.split("=", 1)
try:
val = int(val)
except ValueError:
pass
parsed[key] = val
else:
parsed[part] = True
return parsed
|
mpl-2.0
|
5cebd95963e1d0f119d1be4d966f587c
| 35.270588
| 97
| 0.626014
| 3.759756
| false
| true
| false
| false
|
mozilla/normandy
|
normandy/recipes/migrations/0004_auto_20180502_2340.py
|
1
|
5164
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.11 on 2018-05-02 23:40
# flake8: noqa
from __future__ import unicode_literals
import hashlib
from django.db import migrations
def create_tmp_from_revision(apps, revision, parent=None):
ApprovalRequest = apps.get_model("recipes", "ApprovalRequest")
TmpRecipeRevision = apps.get_model("recipes", "TmpRecipeRevision")
tmp = TmpRecipeRevision(
created=revision.created,
updated=revision.updated,
comment=revision.comment,
name=revision.name,
arguments_json=revision.arguments_json,
extra_filter_expression=revision.extra_filter_expression,
identicon_seed=revision.identicon_seed,
action=revision.action,
parent=parent,
recipe=revision.recipe,
user=revision.user,
)
tmp.save()
if revision.approved_for_recipe.count():
tmp.approved_for_recipe.add(revision.approved_for_recipe.get())
if revision.latest_for_recipe.count():
tmp.latest_for_recipe.add(revision.latest_for_recipe.get())
try:
approval_request = revision.approval_request
approval_request.tmp_revision = tmp
approval_request.save()
except ApprovalRequest.DoesNotExist:
pass
for channel in revision.channels.all():
tmp.channels.add(channel)
for country in revision.countries.all():
tmp.countries.add(country)
for locale in revision.locales.all():
tmp.locales.add(locale)
return tmp
def copy_revisions_to_tmp(apps, schema_editor):
RecipeRevision = apps.get_model("recipes", "RecipeRevision")
for revision in RecipeRevision.objects.filter(parent=None):
current_rev = revision
parent_tmp = create_tmp_from_revision(apps, current_rev)
try:
while current_rev.child:
parent_tmp = create_tmp_from_revision(apps, current_rev.child, parent=parent_tmp)
current_rev = current_rev.child
except RecipeRevision.DoesNotExist:
pass
def get_filter_expression(revision):
parts = []
if revision.locales.count():
locales = ", ".join(["'{}'".format(l.code) for l in revision.locales.all()])
parts.append("normandy.locale in [{}]".format(locales))
if revision.countries.count():
countries = ", ".join(["'{}'".format(c.code) for c in revision.countries.all()])
parts.append("normandy.country in [{}]".format(countries))
if revision.channels.count():
channels = ", ".join(["'{}'".format(c.slug) for c in revision.channels.all()])
parts.append("normandy.channel in [{}]".format(channels))
if revision.extra_filter_expression:
parts.append(revision.extra_filter_expression)
expression = ") && (".join(parts)
return "({})".format(expression) if len(parts) > 1 else expression
def hash(revision):
data = "{}{}{}{}{}{}".format(
revision.recipe.id,
revision.created,
revision.name,
revision.action.id,
revision.arguments_json,
get_filter_expression(revision),
)
return hashlib.sha256(data.encode()).hexdigest()
def create_revision_from_tmp(apps, tmp, parent=None):
ApprovalRequest = apps.get_model("recipes", "ApprovalRequest")
RecipeRevision = apps.get_model("recipes", "RecipeRevision")
rev = RecipeRevision(
created=tmp.created,
updated=tmp.updated,
comment=tmp.comment,
name=tmp.name,
arguments_json=tmp.arguments_json,
extra_filter_expression=tmp.extra_filter_expression,
identicon_seed=tmp.identicon_seed,
action=tmp.action,
parent=parent,
recipe=tmp.recipe,
user=tmp.user,
)
initial_id = hash(tmp)
rev.id = initial_id
rev.save()
if tmp.approved_for_recipe.count():
rev.approved_for_recipe.add(tmp.approved_for_recipe.get())
if tmp.latest_for_recipe.count():
rev.latest_for_recipe.add(tmp.latest_for_recipe.get())
try:
approval_request = tmp.approval_request
approval_request.revision = rev
approval_request.save()
except ApprovalRequest.DoesNotExist:
pass
for channel in tmp.channels.all():
rev.channels.add(channel)
for country in tmp.countries.all():
rev.countries.add(country)
for locale in tmp.locales.all():
rev.locales.add(locale)
return rev
def copy_tmp_to_revisions(apps, schema_editor):
TmpRecipeRevision = apps.get_model("recipes", "TmpRecipeRevision")
for tmp in TmpRecipeRevision.objects.filter(parent=None):
current_tmp = tmp
parent_rev = create_revision_from_tmp(apps, current_tmp)
try:
while current_tmp.child:
parent_rev = create_revision_from_tmp(apps, current_tmp.child, parent=parent_rev)
current_tmp = current_tmp.child
except TmpRecipeRevision.DoesNotExist:
pass
class Migration(migrations.Migration):
dependencies = [("recipes", "0003_tmpreciperevision")]
operations = [migrations.RunPython(copy_revisions_to_tmp, copy_tmp_to_revisions)]
|
mpl-2.0
|
b08fd3e120b5fcd53bc0145f4b760be4
| 28.849711
| 97
| 0.647366
| 3.819527
| false
| false
| false
| false
|
mozilla/normandy
|
normandy/recipes/api/v3/serializers.py
|
1
|
11345
|
from rest_framework import serializers
from factory.fuzzy import FuzzyText
from normandy.base.api.v3.serializers import UserSerializer
from normandy.base.jexl import get_normandy_jexl
from normandy.recipes import filters
from normandy.recipes.api.fields import (
ActionImplementationHyperlinkField,
FilterObjectField,
)
from normandy.recipes.models import (
Action,
ApprovalRequest,
EnabledState,
Recipe,
RecipeRevision,
Signature,
)
from normandy.recipes.validators import JSONSchemaValidator
class CustomizableSerializerMixin:
"""Serializer Mixin that allows callers to exclude fields on instance of this serializer."""
def __init__(self, *args, **kwargs):
exclude_fields = kwargs.pop("exclude_fields", [])
super().__init__(*args, **kwargs)
if exclude_fields:
for field in exclude_fields:
self.fields.pop(field)
class ActionSerializer(serializers.ModelSerializer):
arguments_schema = serializers.JSONField()
implementation_url = ActionImplementationHyperlinkField()
class Meta:
model = Action
fields = ["arguments_schema", "name", "id", "implementation_url"]
class ApprovalRequestSerializer(serializers.ModelSerializer):
approver = UserSerializer()
created = serializers.DateTimeField(read_only=True)
creator = UserSerializer()
revision = serializers.SerializerMethodField(read_only=True)
class Meta:
model = ApprovalRequest
fields = ["approved", "approver", "comment", "created", "creator", "id", "revision"]
def get_revision(self, instance):
serializer = RecipeRevisionLinkSerializer(instance.revision)
return serializer.data
class EnabledStateSerializer(CustomizableSerializerMixin, serializers.ModelSerializer):
creator = UserSerializer()
class Meta:
model = EnabledState
fields = ["id", "revision_id", "created", "creator", "enabled", "carryover_from"]
class RecipeRevisionSerializer(serializers.ModelSerializer):
action = serializers.SerializerMethodField(read_only=True)
approval_request = ApprovalRequestSerializer(read_only=True)
capabilities = serializers.ListField(read_only=True)
comment = serializers.CharField(required=False)
creator = UserSerializer(source="user", read_only=True)
date_created = serializers.DateTimeField(source="created", read_only=True)
enabled_states = EnabledStateSerializer(many=True, exclude_fields=["revision_id"])
filter_object = serializers.ListField(child=FilterObjectField())
recipe = serializers.SerializerMethodField(read_only=True)
class Meta:
model = RecipeRevision
fields = [
"action",
"approval_request",
"arguments",
"experimenter_slug",
"capabilities",
"comment",
"creator",
"date_created",
"enabled_states",
"enabled",
"extra_capabilities",
"extra_filter_expression",
"filter_expression",
"filter_object",
"id",
"identicon_seed",
"metadata",
"name",
"recipe",
"updated",
]
def get_recipe(self, instance):
serializer = RecipeLinkSerializer(instance.recipe)
return serializer.data
def get_action(self, instance):
serializer = ActionSerializer(
instance.action, read_only=True, context={"request": self.context.get("request")}
)
return serializer.data
class SignatureSerializer(serializers.ModelSerializer):
timestamp = serializers.DateTimeField(read_only=True)
signature = serializers.ReadOnlyField()
x5u = serializers.ReadOnlyField()
public_key = serializers.ReadOnlyField()
class Meta:
model = Signature
fields = ["timestamp", "signature", "x5u", "public_key"]
class RecipeSerializer(CustomizableSerializerMixin, serializers.ModelSerializer):
# read-only fields
approved_revision = RecipeRevisionSerializer(read_only=True)
latest_revision = RecipeRevisionSerializer(read_only=True)
signature = SignatureSerializer(read_only=True)
uses_only_baseline_capabilities = serializers.BooleanField(
source="latest_revision.uses_only_baseline_capabilities", read_only=True
)
# write-only fields
action_id = serializers.PrimaryKeyRelatedField(
source="action", queryset=Action.objects.all(), write_only=True
)
arguments = serializers.JSONField(write_only=True)
extra_filter_expression = serializers.CharField(
required=False, allow_blank=True, write_only=True
)
filter_object = serializers.ListField(
child=FilterObjectField(), required=False, write_only=True
)
name = serializers.CharField(write_only=True)
identicon_seed = serializers.CharField(required=False, write_only=True)
comment = serializers.CharField(required=False, write_only=True)
experimenter_slug = serializers.CharField(
required=False, write_only=True, allow_null=True, allow_blank=True
)
extra_capabilities = serializers.ListField(required=False, write_only=True)
class Meta:
model = Recipe
fields = [
# read-only
"approved_revision",
"id",
"latest_revision",
"signature",
"uses_only_baseline_capabilities",
# write-only
"action_id",
"arguments",
"extra_filter_expression",
"filter_object",
"name",
"identicon_seed",
"comment",
"experimenter_slug",
"extra_capabilities",
]
def get_action(self, instance):
serializer = ActionSerializer(
instance.latest_revision.action,
read_only=True,
context={"request": self.context.get("request")},
)
return serializer.data
def update(self, instance, validated_data):
request = self.context.get("request")
if request and request.user:
validated_data["user"] = request.user
instance.revise(**validated_data)
return instance
def create(self, validated_data):
request = self.context.get("request")
if request and request.user:
validated_data["user"] = request.user
if "identicon_seed" not in validated_data:
validated_data["identicon_seed"] = f"v1:{FuzzyText().fuzz()}"
recipe = Recipe.objects.create()
return self.update(recipe, validated_data)
def validate_extra_filter_expression(self, value):
if value:
jexl = get_normandy_jexl()
errors = list(jexl.validate(value))
if errors:
raise serializers.ValidationError(errors)
return value
def validate(self, data):
data = super().validate(data)
action = data.get("action")
if action is None:
action = self.instance.latest_revision.action
arguments = data.get("arguments")
if arguments is not None:
# Ensure the value is a dict
if not isinstance(arguments, dict):
raise serializers.ValidationError({"arguments": "Must be an object."})
# Get the schema associated with the selected action
schema = action.arguments_schema
schemaValidator = JSONSchemaValidator(schema)
errorResponse = {}
errors = sorted(schemaValidator.iter_errors(arguments), key=lambda e: e.path)
# Loop through ValidationErrors returned by JSONSchema
# Each error contains a message and a path attribute
# message: string human-readable error explanation
# path: list containing path to offending element
for error in errors:
currentLevel = errorResponse
# Loop through the path of the current error
# e.g. ['surveys'][0]['weight']
for index, path in enumerate(error.path):
# If this key already exists in our error response, step into it
if path in currentLevel:
currentLevel = currentLevel[path]
continue
else:
# If we haven't reached the end of the path, add this path
# as a key in our error response object and step into it
if index < len(error.path) - 1:
currentLevel[path] = {}
currentLevel = currentLevel[path]
continue
# If we've reached the final path, set the error message
else:
currentLevel[path] = error.message
if errorResponse:
raise serializers.ValidationError({"arguments": errorResponse})
if self.instance is None:
if data.get("extra_filter_expression", "").strip() == "":
if not data.get("filter_object"):
raise serializers.ValidationError(
"one of extra_filter_expression or filter_object is required"
)
else:
if "extra_filter_expression" in data or "filter_object" in data:
# If either is attempted to be updated, at least one of them must be truthy.
if not data.get("extra_filter_expression", "").strip() and not data.get(
"filter_object"
):
raise serializers.ValidationError(
"if extra_filter_expression is blank, "
"at least one filter_object is required"
)
return data
def validate_filter_object(self, value):
if not isinstance(value, list):
raise serializers.ValidationError(
{"non field errors": ["filter_object must be a list."]}
)
errors = {}
for i, obj in enumerate(value):
if not isinstance(obj, dict):
errors[i] = {"non field errors": ["filter_object members must be objects."]}
continue
if "type" not in obj:
errors[i] = {"type": ["This field is required."]}
break
Filter = filters.by_type.get(obj["type"])
if Filter is not None:
filter = Filter(data=obj)
if not filter.is_valid():
errors[i] = filter.errors
else:
errors[i] = {"type": [f'Unknown filter object type "{obj["type"]}".']}
if errors:
raise serializers.ValidationError(errors)
return value
class RecipeLinkSerializer(RecipeSerializer):
class Meta(RecipeSerializer.Meta):
fields = ["approved_revision_id", "id", "latest_revision_id"]
class RecipeRevisionLinkSerializer(RecipeRevisionSerializer):
recipe_id = serializers.SerializerMethodField(read_only=True)
class Meta(RecipeSerializer.Meta):
fields = ["id", "recipe_id"]
def get_recipe_id(self, instance):
return instance.recipe.id
|
mpl-2.0
|
9e69db44885b75be18a03b20bf883646
| 34.676101
| 96
| 0.606435
| 4.634395
| false
| false
| false
| false
|
mozilla/normandy
|
contract-tests/v3_api/test_group_delete.py
|
1
|
1231
|
import uuid
from support.assertions import assert_valid_schema
from urllib.parse import urljoin
def test_group_delete(conf, requests_session, headers):
# Create a new group
data = {"name": str(uuid.uuid4())}
response = requests_session.post(
urljoin(conf.getoption("server"), "/api/v3/group/"), headers=headers, data=data
)
assert response.status_code == 201
assert_valid_schema(response.json())
group_data = response.json()
group_id = group_data["id"]
# Verify group was stored and contains expected data
response = requests_session.get(
urljoin(conf.getoption("server"), "/api/v3/group/{}/".format(group_id)), headers=headers
)
group_data = response.json()
assert response.status_code == 200
assert_valid_schema(response.json())
# Delete the group
response = requests_session.delete(
urljoin(conf.getoption("server"), "/api/v3/group/{}/".format(group_id)), headers=headers
)
assert response.status_code == 204
# Verify that it no longer exists
response = requests_session.get(
urljoin(conf.getoption("server"), "/api/v3/group/{}/".format(group_id)), headers=headers
)
assert response.status_code == 404
|
mpl-2.0
|
709834fd8468eb48cb136525254f26bc
| 33.194444
| 96
| 0.670187
| 3.799383
| false
| false
| false
| false
|
developmentseed/landsat-util
|
docs/conf.py
|
9
|
9890
|
# -*- coding: utf-8 -*-
#
# Landsat-util documentation build configuration file, created by
# sphinx-quickstart on Thu May 28 17:52:10 2015.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
from mock import Mock as MagicMock
class Mock(MagicMock):
@classmethod
def __getattr__(cls, name):
return Mock()
MOCK_MODULES = ['numpy', 'rasterio', 'scipy', 'scikit-image', 'homura', 'boto',
'termcolor', 'requests', 'python-dateutil']
sys.modules.update((mod_name, Mock()) for mod_name in MOCK_MODULES)
import os
import sphinx_rtd_theme
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
cwd = os.getcwd()
project_root = os.path.dirname(cwd)
sys.path.insert(0, project_root)
print project_root
import landsat
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.doctest',
'sphinx.ext.coverage',
'sphinx.ext.ifconfig',
'sphinx.ext.viewcode',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'landsat-util'
copyright = u'2015, Development Seed'
author = u'Development Seed'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = landsat.__version__
# The full version, including alpha/beta/rc tags.
release = landsat.__version__
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = "sphinx_rtd_theme"
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr'
#html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# Now only 'ja' uses this config value
#html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
#html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = 'Landsat-utildoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
# Latex figure (float) alignment
#'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'Landsat-util.tex', u'Landsat-util Documentation',
u'Development Seed', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'landsat-util', u'Landsat-util Documentation',
[author], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'Landsat-util', u'Landsat-util Documentation',
author, 'Landsat-util', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
|
cc0-1.0
|
299f01338ac03d06f9f64eefa0b432b3
| 30.800643
| 79
| 0.705359
| 3.653491
| false
| true
| false
| false
|
rmmh/skybot
|
plugins/lastfm.py
|
3
|
2391
|
"""
The Last.fm API key is retrieved from the bot config file.
"""
from util import hook, http
api_url = "http://ws.audioscrobbler.com/2.0/?format=json"
@hook.api_key("lastfm")
@hook.command(autohelp=False)
def lastfm(inp, chan="", nick="", reply=None, api_key=None, db=None):
".lastfm <username> [dontsave] | @<nick> -- gets current or last played " "track from lastfm"
db.execute(
"create table if not exists "
"lastfm(chan, nick, user, primary key(chan, nick))"
)
if inp[0:1] == "@":
nick = inp[1:].strip()
user = None
dontsave = True
else:
user = inp
dontsave = user.endswith(" dontsave")
if dontsave:
user = user[:-9].strip().lower()
if not user:
user = db.execute(
"select user from lastfm where chan=? and nick=lower(?)", (chan, nick)
).fetchone()
if not user:
return lastfm.__doc__
user = user[0]
response = http.get_json(
api_url, method="user.getrecenttracks", api_key=api_key, user=user, limit=1
)
if "error" in response:
return "error: %s" % response["message"]
if (
not "track" in response["recenttracks"]
or len(response["recenttracks"]["track"]) == 0
):
return "no recent tracks for user \x02%s\x0F found" % user
tracks = response["recenttracks"]["track"]
if type(tracks) == list:
# if the user is listening to something, the tracks entry is a list
# the first item is the current track
track = tracks[0]
status = "current track"
elif type(tracks) == dict:
# otherwise, they aren't listening to anything right now, and
# the tracks entry is a dict representing the most recent track
track = tracks
status = "last track"
else:
return "error parsing track listing"
title = track["name"]
album = track["album"]["#text"]
artist = track["artist"]["#text"]
ret = "\x02%s\x0F's %s - \x02%s\x0f" % (user, status, title)
if artist:
ret += " by \x02%s\x0f" % artist
if album:
ret += " on \x02%s\x0f" % album
reply(ret)
if inp and not dontsave:
db.execute(
"insert or replace into lastfm(chan, nick, user) " "values (?, ?, ?)",
(chan, nick.lower(), inp),
)
db.commit()
|
unlicense
|
636c141db11c52dd6b085daa4d1fa441
| 27.129412
| 97
| 0.563363
| 3.531758
| false
| false
| false
| false
|
rmmh/skybot
|
plugins/google.py
|
2
|
1308
|
from __future__ import unicode_literals
import random
from util import hook, http
def api_get(query, key, is_image=None, num=1):
url = (
"https://www.googleapis.com/customsearch/v1?cx=007629729846476161907:ud5nlxktgcw"
"&fields=items(title,link,snippet)&safe=off&nfpr=1"
+ ("&searchType=image" if is_image else "")
)
return http.get_json(url, key=key, q=query, num=num)
@hook.api_key("google")
@hook.command("can i get a picture of")
@hook.command("can you grab me a picture of")
@hook.command("give me a print out of")
@hook.command
def gis(inp, api_key=None):
""".gis <term> -- finds an image using google images (safesearch off)"""
parsed = api_get(inp, api_key, is_image=True, num=10)
if "items" not in parsed:
return "no images found"
return random.choice(parsed["items"])["link"]
@hook.api_key("google")
@hook.command("g")
@hook.command
def google(inp, api_key=None):
""".g/.google <query> -- returns first google search result"""
parsed = api_get(inp, api_key)
if "items" not in parsed:
return "no results found"
out = '{link} -- \x02{title}\x02: "{snippet}"'.format(**parsed["items"][0])
out = " ".join(out.split())
if len(out) > 300:
out = out[: out.rfind(" ")] + '..."'
return out
|
unlicense
|
f118032a0344f7392e8812fe3793d67c
| 26.829787
| 89
| 0.626147
| 3.056075
| false
| false
| false
| false
|
rmmh/skybot
|
plugins/util/timesince.py
|
3
|
4139
|
# Copyright (c) Django Software Foundation and individual contributors.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# 3. Neither the name of Django nor the names of its contributors may be used
# to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED.IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
# ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import datetime
def timesince(d, now=None):
"""
Takes two datetime objects and returns the time between d and now
as a nicely formatted string, e.g. "10 minutes". If d occurs after now,
then "0 minutes" is returned.
Units used are years, months, weeks, days, hours, and minutes.
Seconds and microseconds are ignored. Up to two adjacent units will be
displayed. For example, "2 weeks, 3 days" and "1 year, 3 months" are
possible outputs, but "2 weeks, 3 hours" and "1 year, 5 days" are not.
Adapted from http://blog.natbat.co.uk/archive/2003/Jun/14/time_since
"""
chunks = (
(60 * 60 * 24 * 365, ("year", "years")),
(60 * 60 * 24 * 30, ("month", "months")),
(60 * 60 * 24 * 7, ("week", "weeks")),
(60 * 60 * 24, ("day", "days")),
(60 * 60, ("hour", "hours")),
(60, ("minute", "minutes")),
)
# Convert int or float (unix epoch) to datetime.datetime for comparison
if isinstance(d, int) or isinstance(d, float):
d = datetime.datetime.fromtimestamp(d)
# Convert datetime.date to datetime.datetime for comparison.
if not isinstance(d, datetime.datetime):
d = datetime.datetime(d.year, d.month, d.day)
if now and not isinstance(now, datetime.datetime):
now = datetime.datetime(now.year, now.month, now.day)
if not now:
now = datetime.datetime.now()
# ignore microsecond part of 'd' since we removed it from 'now'
delta = now - (d - datetime.timedelta(0, 0, d.microsecond))
since = delta.days * 24 * 60 * 60 + delta.seconds
if since <= 0:
# d is in the future compared to now, stop processing.
return "0 " + "minutes"
for i, (seconds, name) in enumerate(chunks):
count = since // seconds
if count != 0:
break
if count == 1:
s = "%(number)d %(type)s" % {"number": count, "type": name[0]}
else:
s = "%(number)d %(type)s" % {"number": count, "type": name[1]}
if i + 1 < len(chunks):
# Now get the second item
seconds2, name2 = chunks[i + 1]
count2 = (since - (seconds * count)) // seconds2
if count2 != 0:
if count2 == 1:
s += ", %d %s" % (count2, name2[0])
else:
s += ", %d %s" % (count2, name2[1])
return s
def timeuntil(d, now=None):
"""
Like timesince, but returns a string measuring the time until
the given time.
"""
if not now:
now = datetime.datetime.now()
return timesince(now, d)
|
unlicense
|
aa53f92f5fdd6e27a86d9046dc52bf9f
| 39.578431
| 80
| 0.650882
| 3.930674
| false
| false
| false
| false
|
rmmh/skybot
|
plugins/mtg.py
|
3
|
2470
|
from __future__ import print_function
from builtins import range
from util import hook, http
import random
def card_search(name):
matching_cards = http.get_json(
"https://api.magicthegathering.io/v1/cards", name=name
)
for card in matching_cards["cards"]:
if card["name"].lower() == name.lower():
return card
return random.choice(matching_cards["cards"])
@hook.command
def mtg(inp, say=None):
""".mtg <name> - Searches for Magic the Gathering card given <name>"""
try:
card = card_search(inp)
except IndexError:
return "Card not found."
symbols = {
"{0}": "0",
"{1}": "1",
"{2}": "2",
"{3}": "3",
"{4}": "4",
"{5}": "5",
"{6}": "6",
"{7}": "7",
"{8}": "8",
"{9}": "9",
"{10}": "10",
"{11}": "11",
"{12}": "12",
"{13}": "13",
"{14}": "14",
"{15}": "15",
"{16}": "16",
"{17}": "17",
"{18}": "18",
"{19}": "19",
"{20}": "20",
"{T}": "\u27F3",
"{S}": "\u2744",
"{Q}": "\u21BA",
"{C}": "\u27E1",
"{W}": "W",
"{U}": "U",
"{B}": "B",
"{R}": "R",
"{G}": "G",
"{W/P}": "\u03D5",
"{U/P}": "\u03D5",
"{B/P}": "\u03D5",
"{R/P}": "\u03D5",
"{G/P}": "\u03D5",
"{X}": "X",
"\n": " ",
}
results = {
"name": card["name"],
"type": card["type"],
"cost": card.get("manaCost", ""),
"text": card.get("text", ""),
"power": card.get("power"),
"toughness": card.get("toughness"),
"loyalty": card.get("loyalty"),
"multiverseid": card.get("multiverseid"),
}
for fragment, rep in symbols.items():
results["text"] = results["text"].replace(fragment, rep)
results["cost"] = results["cost"].replace(fragment, rep)
template = ["{name} -"]
template.append("{type}")
template.append("- {cost} |")
if results["loyalty"]:
template.append("{loyalty} Loyalty |")
if results["power"]:
template.append("{power}/{toughness} |")
template.append(
"{text} | http://gatherer.wizards.com/Pages/Card/Details.aspx?multiverseid={multiverseid}"
)
return " ".join(template).format(**results)
if __name__ == "__main__":
print(card_search("Black Lotus"))
print(mtg("Black Lotus"))
|
unlicense
|
32c6db1674583320bea728226b9561ab
| 25
| 98
| 0.448178
| 3.130545
| false
| false
| false
| false
|
pytube/pytube
|
pytube/cipher.py
|
1
|
22529
|
"""
This module contains all logic necessary to decipher the signature.
YouTube's strategy to restrict downloading videos is to send a ciphered version
of the signature to the client, along with the decryption algorithm obfuscated
in JavaScript. For the clients to play the videos, JavaScript must take the
ciphered version, cycle it through a series of "transform functions," and then
signs the media URL with the output.
This module is responsible for (1) finding and extracting those "transform
functions" (2) maps them to Python equivalents and (3) taking the ciphered
signature and decoding it.
"""
import logging
import re
from itertools import chain
from typing import Any, Callable, Dict, List, Optional, Tuple
from pytube.exceptions import ExtractError, RegexMatchError
from pytube.helpers import cache, regex_search
from pytube.parser import find_object_from_startpoint, throttling_array_split
logger = logging.getLogger(__name__)
class Cipher:
def __init__(self, js: str):
self.transform_plan: List[str] = get_transform_plan(js)
var_regex = re.compile(r"^\w+\W")
var_match = var_regex.search(self.transform_plan[0])
if not var_match:
raise RegexMatchError(
caller="__init__", pattern=var_regex.pattern
)
var = var_match.group(0)[:-1]
self.transform_map = get_transform_map(js, var)
self.js_func_patterns = [
r"\w+\.(\w+)\(\w,(\d+)\)",
r"\w+\[(\"\w+\")\]\(\w,(\d+)\)"
]
self.throttling_plan = get_throttling_plan(js)
self.throttling_array = get_throttling_function_array(js)
self.calculated_n = None
def calculate_n(self, initial_n: list):
"""Converts n to the correct value to prevent throttling."""
if self.calculated_n:
return self.calculated_n
# First, update all instances of 'b' with the list(initial_n)
for i in range(len(self.throttling_array)):
if self.throttling_array[i] == 'b':
self.throttling_array[i] = initial_n
for step in self.throttling_plan:
curr_func = self.throttling_array[int(step[0])]
if not callable(curr_func):
logger.debug(f'{curr_func} is not callable.')
logger.debug(f'Throttling array:\n{self.throttling_array}\n')
raise ExtractError(f'{curr_func} is not callable.')
first_arg = self.throttling_array[int(step[1])]
if len(step) == 2:
curr_func(first_arg)
elif len(step) == 3:
second_arg = self.throttling_array[int(step[2])]
curr_func(first_arg, second_arg)
self.calculated_n = ''.join(initial_n)
return self.calculated_n
def get_signature(self, ciphered_signature: str) -> str:
"""Decipher the signature.
Taking the ciphered signature, applies the transform functions.
:param str ciphered_signature:
The ciphered signature sent in the ``player_config``.
:rtype: str
:returns:
Decrypted signature required to download the media content.
"""
signature = list(ciphered_signature)
for js_func in self.transform_plan:
name, argument = self.parse_function(js_func) # type: ignore
signature = self.transform_map[name](signature, argument)
logger.debug(
"applied transform function\n"
"output: %s\n"
"js_function: %s\n"
"argument: %d\n"
"function: %s",
"".join(signature),
name,
argument,
self.transform_map[name],
)
return "".join(signature)
@cache
def parse_function(self, js_func: str) -> Tuple[str, int]:
"""Parse the Javascript transform function.
Break a JavaScript transform function down into a two element ``tuple``
containing the function name and some integer-based argument.
:param str js_func:
The JavaScript version of the transform function.
:rtype: tuple
:returns:
two element tuple containing the function name and an argument.
**Example**:
parse_function('DE.AJ(a,15)')
('AJ', 15)
"""
logger.debug("parsing transform function")
for pattern in self.js_func_patterns:
regex = re.compile(pattern)
parse_match = regex.search(js_func)
if parse_match:
fn_name, fn_arg = parse_match.groups()
return fn_name, int(fn_arg)
raise RegexMatchError(
caller="parse_function", pattern="js_func_patterns"
)
def get_initial_function_name(js: str) -> str:
"""Extract the name of the function responsible for computing the signature.
:param str js:
The contents of the base.js asset file.
:rtype: str
:returns:
Function name from regex match
"""
function_patterns = [
r"\b[cs]\s*&&\s*[adf]\.set\([^,]+\s*,\s*encodeURIComponent\s*\(\s*(?P<sig>[a-zA-Z0-9$]+)\(", # noqa: E501
r"\b[a-zA-Z0-9]+\s*&&\s*[a-zA-Z0-9]+\.set\([^,]+\s*,\s*encodeURIComponent\s*\(\s*(?P<sig>[a-zA-Z0-9$]+)\(", # noqa: E501
r'(?:\b|[^a-zA-Z0-9$])(?P<sig>[a-zA-Z0-9$]{2})\s*=\s*function\(\s*a\s*\)\s*{\s*a\s*=\s*a\.split\(\s*""\s*\)', # noqa: E501
r'(?P<sig>[a-zA-Z0-9$]+)\s*=\s*function\(\s*a\s*\)\s*{\s*a\s*=\s*a\.split\(\s*""\s*\)', # noqa: E501
r'(["\'])signature\1\s*,\s*(?P<sig>[a-zA-Z0-9$]+)\(',
r"\.sig\|\|(?P<sig>[a-zA-Z0-9$]+)\(",
r"yt\.akamaized\.net/\)\s*\|\|\s*.*?\s*[cs]\s*&&\s*[adf]\.set\([^,]+\s*,\s*(?:encodeURIComponent\s*\()?\s*(?P<sig>[a-zA-Z0-9$]+)\(", # noqa: E501
r"\b[cs]\s*&&\s*[adf]\.set\([^,]+\s*,\s*(?P<sig>[a-zA-Z0-9$]+)\(", # noqa: E501
r"\b[a-zA-Z0-9]+\s*&&\s*[a-zA-Z0-9]+\.set\([^,]+\s*,\s*(?P<sig>[a-zA-Z0-9$]+)\(", # noqa: E501
r"\bc\s*&&\s*a\.set\([^,]+\s*,\s*\([^)]*\)\s*\(\s*(?P<sig>[a-zA-Z0-9$]+)\(", # noqa: E501
r"\bc\s*&&\s*[a-zA-Z0-9]+\.set\([^,]+\s*,\s*\([^)]*\)\s*\(\s*(?P<sig>[a-zA-Z0-9$]+)\(", # noqa: E501
r"\bc\s*&&\s*[a-zA-Z0-9]+\.set\([^,]+\s*,\s*\([^)]*\)\s*\(\s*(?P<sig>[a-zA-Z0-9$]+)\(", # noqa: E501
]
logger.debug("finding initial function name")
for pattern in function_patterns:
regex = re.compile(pattern)
function_match = regex.search(js)
if function_match:
logger.debug("finished regex search, matched: %s", pattern)
return function_match.group(1)
raise RegexMatchError(
caller="get_initial_function_name", pattern="multiple"
)
def get_transform_plan(js: str) -> List[str]:
"""Extract the "transform plan".
The "transform plan" is the functions that the ciphered signature is
cycled through to obtain the actual signature.
:param str js:
The contents of the base.js asset file.
**Example**:
['DE.AJ(a,15)',
'DE.VR(a,3)',
'DE.AJ(a,51)',
'DE.VR(a,3)',
'DE.kT(a,51)',
'DE.kT(a,8)',
'DE.VR(a,3)',
'DE.kT(a,21)']
"""
name = re.escape(get_initial_function_name(js))
pattern = r"%s=function\(\w\){[a-z=\.\(\"\)]*;(.*);(?:.+)}" % name
logger.debug("getting transform plan")
return regex_search(pattern, js, group=1).split(";")
def get_transform_object(js: str, var: str) -> List[str]:
"""Extract the "transform object".
The "transform object" contains the function definitions referenced in the
"transform plan". The ``var`` argument is the obfuscated variable name
which contains these functions, for example, given the function call
``DE.AJ(a,15)`` returned by the transform plan, "DE" would be the var.
:param str js:
The contents of the base.js asset file.
:param str var:
The obfuscated variable name that stores an object with all functions
that descrambles the signature.
**Example**:
>>> get_transform_object(js, 'DE')
['AJ:function(a){a.reverse()}',
'VR:function(a,b){a.splice(0,b)}',
'kT:function(a,b){var c=a[0];a[0]=a[b%a.length];a[b]=c}']
"""
pattern = r"var %s={(.*?)};" % re.escape(var)
logger.debug("getting transform object")
regex = re.compile(pattern, flags=re.DOTALL)
transform_match = regex.search(js)
if not transform_match:
raise RegexMatchError(caller="get_transform_object", pattern=pattern)
return transform_match.group(1).replace("\n", " ").split(", ")
def get_transform_map(js: str, var: str) -> Dict:
"""Build a transform function lookup.
Build a lookup table of obfuscated JavaScript function names to the
Python equivalents.
:param str js:
The contents of the base.js asset file.
:param str var:
The obfuscated variable name that stores an object with all functions
that descrambles the signature.
"""
transform_object = get_transform_object(js, var)
mapper = {}
for obj in transform_object:
# AJ:function(a){a.reverse()} => AJ, function(a){a.reverse()}
name, function = obj.split(":", 1)
fn = map_functions(function)
mapper[name] = fn
return mapper
def get_throttling_function_name(js: str) -> str:
"""Extract the name of the function that computes the throttling parameter.
:param str js:
The contents of the base.js asset file.
:rtype: str
:returns:
The name of the function used to compute the throttling parameter.
"""
function_patterns = [
# https://github.com/ytdl-org/youtube-dl/issues/29326#issuecomment-865985377
# https://github.com/yt-dlp/yt-dlp/commit/48416bc4a8f1d5ff07d5977659cb8ece7640dcd8
# var Bpa = [iha];
# ...
# a.C && (b = a.get("n")) && (b = Bpa[0](b), a.set("n", b),
# Bpa.length || iha("")) }};
# In the above case, `iha` is the relevant function name
r'a\.[a-zA-Z]\s*&&\s*\([a-z]\s*=\s*a\.get\("n"\)\)\s*&&\s*'
r'\([a-z]\s*=\s*([a-zA-Z0-9$]+)(\[\d+\])?\([a-z]\)',
]
logger.debug('Finding throttling function name')
for pattern in function_patterns:
regex = re.compile(pattern)
function_match = regex.search(js)
if function_match:
logger.debug("finished regex search, matched: %s", pattern)
if len(function_match.groups()) == 1:
return function_match.group(1)
idx = function_match.group(2)
if idx:
idx = idx.strip("[]")
array = re.search(
r'var {nfunc}\s*=\s*(\[.+?\]);'.format(
nfunc=re.escape(function_match.group(1))),
js
)
if array:
array = array.group(1).strip("[]").split(",")
array = [x.strip() for x in array]
return array[int(idx)]
raise RegexMatchError(
caller="get_throttling_function_name", pattern="multiple"
)
def get_throttling_function_code(js: str) -> str:
"""Extract the raw code for the throttling function.
:param str js:
The contents of the base.js asset file.
:rtype: str
:returns:
The name of the function used to compute the throttling parameter.
"""
# Begin by extracting the correct function name
name = re.escape(get_throttling_function_name(js))
# Identify where the function is defined
pattern_start = r"%s=function\(\w\)" % name
regex = re.compile(pattern_start)
match = regex.search(js)
# Extract the code within curly braces for the function itself, and merge any split lines
code_lines_list = find_object_from_startpoint(js, match.span()[1]).split('\n')
joined_lines = "".join(code_lines_list)
# Prepend function definition (e.g. `Dea=function(a)`)
return match.group(0) + joined_lines
def get_throttling_function_array(js: str) -> List[Any]:
"""Extract the "c" array.
:param str js:
The contents of the base.js asset file.
:returns:
The array of various integers, arrays, and functions.
"""
raw_code = get_throttling_function_code(js)
array_start = r",c=\["
array_regex = re.compile(array_start)
match = array_regex.search(raw_code)
array_raw = find_object_from_startpoint(raw_code, match.span()[1] - 1)
str_array = throttling_array_split(array_raw)
converted_array = []
for el in str_array:
try:
converted_array.append(int(el))
continue
except ValueError:
# Not an integer value.
pass
if el == 'null':
converted_array.append(None)
continue
if el.startswith('"') and el.endswith('"'):
# Convert e.g. '"abcdef"' to string without quotation marks, 'abcdef'
converted_array.append(el[1:-1])
continue
if el.startswith('function'):
mapper = (
(r"{for\(\w=\(\w%\w\.length\+\w\.length\)%\w\.length;\w--;\)\w\.unshift\(\w.pop\(\)\)}", throttling_unshift), # noqa:E501
(r"{\w\.reverse\(\)}", throttling_reverse),
(r"{\w\.push\(\w\)}", throttling_push),
(r";var\s\w=\w\[0\];\w\[0\]=\w\[\w\];\w\[\w\]=\w}", throttling_swap),
(r"case\s\d+", throttling_cipher_function),
(r"\w\.splice\(0,1,\w\.splice\(\w,1,\w\[0\]\)\[0\]\)", throttling_nested_splice), # noqa:E501
(r";\w\.splice\(\w,1\)}", js_splice),
(r"\w\.splice\(-\w\)\.reverse\(\)\.forEach\(function\(\w\){\w\.unshift\(\w\)}\)", throttling_prepend), # noqa:E501
(r"for\(var \w=\w\.length;\w;\)\w\.push\(\w\.splice\(--\w,1\)\[0\]\)}", throttling_reverse), # noqa:E501
)
found = False
for pattern, fn in mapper:
if re.search(pattern, el):
converted_array.append(fn)
found = True
if found:
continue
converted_array.append(el)
# Replace null elements with array itself
for i in range(len(converted_array)):
if converted_array[i] is None:
converted_array[i] = converted_array
return converted_array
def get_throttling_plan(js: str):
"""Extract the "throttling plan".
The "throttling plan" is a list of tuples used for calling functions
in the c array. The first element of the tuple is the index of the
function to call, and any remaining elements of the tuple are arguments
to pass to that function.
:param str js:
The contents of the base.js asset file.
:returns:
The full function code for computing the throttlign parameter.
"""
raw_code = get_throttling_function_code(js)
transform_start = r"try{"
plan_regex = re.compile(transform_start)
match = plan_regex.search(raw_code)
transform_plan_raw = find_object_from_startpoint(raw_code, match.span()[1] - 1)
# Steps are either c[x](c[y]) or c[x](c[y],c[z])
step_start = r"c\[(\d+)\]\(c\[(\d+)\](,c(\[(\d+)\]))?\)"
step_regex = re.compile(step_start)
matches = step_regex.findall(transform_plan_raw)
transform_steps = []
for match in matches:
if match[4] != '':
transform_steps.append((match[0],match[1],match[4]))
else:
transform_steps.append((match[0],match[1]))
return transform_steps
def reverse(arr: List, _: Optional[Any]):
"""Reverse elements in a list.
This function is equivalent to:
.. code-block:: javascript
function(a, b) { a.reverse() }
This method takes an unused ``b`` variable as their transform functions
universally sent two arguments.
**Example**:
>>> reverse([1, 2, 3, 4])
[4, 3, 2, 1]
"""
return arr[::-1]
def splice(arr: List, b: int):
"""Add/remove items to/from a list.
This function is equivalent to:
.. code-block:: javascript
function(a, b) { a.splice(0, b) }
**Example**:
>>> splice([1, 2, 3, 4], 2)
[1, 2]
"""
return arr[b:]
def swap(arr: List, b: int):
"""Swap positions at b modulus the list length.
This function is equivalent to:
.. code-block:: javascript
function(a, b) { var c=a[0];a[0]=a[b%a.length];a[b]=c }
**Example**:
>>> swap([1, 2, 3, 4], 2)
[3, 2, 1, 4]
"""
r = b % len(arr)
return list(chain([arr[r]], arr[1:r], [arr[0]], arr[r + 1 :]))
def throttling_reverse(arr: list):
"""Reverses the input list.
Needs to do an in-place reversal so that the passed list gets changed.
To accomplish this, we create a reversed copy, and then change each
indvidual element.
"""
reverse_copy = arr.copy()[::-1]
for i in range(len(reverse_copy)):
arr[i] = reverse_copy[i]
def throttling_push(d: list, e: Any):
"""Pushes an element onto a list."""
d.append(e)
def throttling_mod_func(d: list, e: int):
"""Perform the modular function from the throttling array functions.
In the javascript, the modular operation is as follows:
e = (e % d.length + d.length) % d.length
We simply translate this to python here.
"""
return (e % len(d) + len(d)) % len(d)
def throttling_unshift(d: list, e: int):
"""Rotates the elements of the list to the right.
In the javascript, the operation is as follows:
for(e=(e%d.length+d.length)%d.length;e--;)d.unshift(d.pop())
"""
e = throttling_mod_func(d, e)
new_arr = d[-e:] + d[:-e]
d.clear()
for el in new_arr:
d.append(el)
def throttling_cipher_function(d: list, e: str):
"""This ciphers d with e to generate a new list.
In the javascript, the operation is as follows:
var h = [A-Za-z0-9-_], f = 96; // simplified from switch-case loop
d.forEach(
function(l,m,n){
this.push(
n[m]=h[
(h.indexOf(l)-h.indexOf(this[m])+m-32+f--)%h.length
]
)
},
e.split("")
)
"""
h = list('ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789-_')
f = 96
# by naming it "this" we can more closely reflect the js
this = list(e)
# This is so we don't run into weirdness with enumerate while
# we change the input list
copied_list = d.copy()
for m, l in enumerate(copied_list):
bracket_val = (h.index(l) - h.index(this[m]) + m - 32 + f) % len(h)
this.append(
h[bracket_val]
)
d[m] = h[bracket_val]
f -= 1
def throttling_nested_splice(d: list, e: int):
"""Nested splice function in throttling js.
In the javascript, the operation is as follows:
function(d,e){
e=(e%d.length+d.length)%d.length;
d.splice(
0,
1,
d.splice(
e,
1,
d[0]
)[0]
)
}
While testing, all this seemed to do is swap element 0 and e,
but the actual process is preserved in case there was an edge
case that was not considered.
"""
e = throttling_mod_func(d, e)
inner_splice = js_splice(
d,
e,
1,
d[0]
)
js_splice(
d,
0,
1,
inner_splice[0]
)
def throttling_prepend(d: list, e: int):
"""
In the javascript, the operation is as follows:
function(d,e){
e=(e%d.length+d.length)%d.length;
d.splice(-e).reverse().forEach(
function(f){
d.unshift(f)
}
)
}
Effectively, this moves the last e elements of d to the beginning.
"""
start_len = len(d)
# First, calculate e
e = throttling_mod_func(d, e)
# Then do the prepending
new_arr = d[-e:] + d[:-e]
# And update the input list
d.clear()
for el in new_arr:
d.append(el)
end_len = len(d)
assert start_len == end_len
def throttling_swap(d: list, e: int):
"""Swap positions of the 0'th and e'th elements in-place."""
e = throttling_mod_func(d, e)
f = d[0]
d[0] = d[e]
d[e] = f
def js_splice(arr: list, start: int, delete_count=None, *items):
"""Implementation of javascript's splice function.
:param list arr:
Array to splice
:param int start:
Index at which to start changing the array
:param int delete_count:
Number of elements to delete from the array
:param *items:
Items to add to the array
Reference: https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/Array/splice # noqa:E501
"""
# Special conditions for start value
try:
if start > len(arr):
start = len(arr)
# If start is negative, count backwards from end
if start < 0:
start = len(arr) - start
except TypeError:
# Non-integer start values are treated as 0 in js
start = 0
# Special condition when delete_count is greater than remaining elements
if not delete_count or delete_count >= len(arr) - start:
delete_count = len(arr) - start # noqa: N806
deleted_elements = arr[start:start + delete_count]
# Splice appropriately.
new_arr = arr[:start] + list(items) + arr[start + delete_count:]
# Replace contents of input array
arr.clear()
for el in new_arr:
arr.append(el)
return deleted_elements
def map_functions(js_func: str) -> Callable:
"""For a given JavaScript transform function, return the Python equivalent.
:param str js_func:
The JavaScript version of the transform function.
"""
mapper = (
# function(a){a.reverse()}
(r"{\w\.reverse\(\)}", reverse),
# function(a,b){a.splice(0,b)}
(r"{\w\.splice\(0,\w\)}", splice),
# function(a,b){var c=a[0];a[0]=a[b%a.length];a[b]=c}
(r"{var\s\w=\w\[0\];\w\[0\]=\w\[\w\%\w.length\];\w\[\w\]=\w}", swap),
# function(a,b){var c=a[0];a[0]=a[b%a.length];a[b%a.length]=c}
(
r"{var\s\w=\w\[0\];\w\[0\]=\w\[\w\%\w.length\];\w\[\w\%\w.length\]=\w}",
swap,
),
)
for pattern, fn in mapper:
if re.search(pattern, js_func):
return fn
raise RegexMatchError(caller="map_functions", pattern="multiple")
|
unlicense
|
9409df7792d2d6d6536ad2fa31022a4e
| 31.322812
| 154
| 0.566337
| 3.357526
| false
| false
| false
| false
|
pytube/pytube
|
pytube/request.py
|
1
|
8512
|
"""Implements a simple wrapper around urlopen."""
import http.client
import json
import logging
import re
import socket
from functools import lru_cache
from urllib import parse
from urllib.error import URLError
from urllib.request import Request, urlopen
from pytube.exceptions import RegexMatchError, MaxRetriesExceeded
from pytube.helpers import regex_search
logger = logging.getLogger(__name__)
default_range_size = 9437184 # 9MB
def _execute_request(
url,
method=None,
headers=None,
data=None,
timeout=socket._GLOBAL_DEFAULT_TIMEOUT
):
base_headers = {"User-Agent": "Mozilla/5.0", "accept-language": "en-US,en"}
if headers:
base_headers.update(headers)
if data:
# encode data for request
if not isinstance(data, bytes):
data = bytes(json.dumps(data), encoding="utf-8")
if url.lower().startswith("http"):
request = Request(url, headers=base_headers, method=method, data=data)
else:
raise ValueError("Invalid URL")
return urlopen(request, timeout=timeout) # nosec
def get(url, extra_headers=None, timeout=socket._GLOBAL_DEFAULT_TIMEOUT):
"""Send an http GET request.
:param str url:
The URL to perform the GET request for.
:param dict extra_headers:
Extra headers to add to the request
:rtype: str
:returns:
UTF-8 encoded string of response
"""
if extra_headers is None:
extra_headers = {}
response = _execute_request(url, headers=extra_headers, timeout=timeout)
return response.read().decode("utf-8")
def post(url, extra_headers=None, data=None, timeout=socket._GLOBAL_DEFAULT_TIMEOUT):
"""Send an http POST request.
:param str url:
The URL to perform the POST request for.
:param dict extra_headers:
Extra headers to add to the request
:param dict data:
The data to send on the POST request
:rtype: str
:returns:
UTF-8 encoded string of response
"""
# could technically be implemented in get,
# but to avoid confusion implemented like this
if extra_headers is None:
extra_headers = {}
if data is None:
data = {}
# required because the youtube servers are strict on content type
# raises HTTPError [400]: Bad Request otherwise
extra_headers.update({"Content-Type": "application/json"})
response = _execute_request(
url,
headers=extra_headers,
data=data,
timeout=timeout
)
return response.read().decode("utf-8")
def seq_stream(
url,
timeout=socket._GLOBAL_DEFAULT_TIMEOUT,
max_retries=0
):
"""Read the response in sequence.
:param str url: The URL to perform the GET request for.
:rtype: Iterable[bytes]
"""
# YouTube expects a request sequence number as part of the parameters.
split_url = parse.urlsplit(url)
base_url = '%s://%s/%s?' % (split_url.scheme, split_url.netloc, split_url.path)
querys = dict(parse.parse_qsl(split_url.query))
# The 0th sequential request provides the file headers, which tell us
# information about how the file is segmented.
querys['sq'] = 0
url = base_url + parse.urlencode(querys)
segment_data = b''
for chunk in stream(url, timeout=timeout, max_retries=max_retries):
yield chunk
segment_data += chunk
# We can then parse the header to find the number of segments
stream_info = segment_data.split(b'\r\n')
segment_count_pattern = re.compile(b'Segment-Count: (\\d+)')
for line in stream_info:
match = segment_count_pattern.search(line)
if match:
segment_count = int(match.group(1).decode('utf-8'))
# We request these segments sequentially to build the file.
seq_num = 1
while seq_num <= segment_count:
# Create sequential request URL
querys['sq'] = seq_num
url = base_url + parse.urlencode(querys)
yield from stream(url, timeout=timeout, max_retries=max_retries)
seq_num += 1
return # pylint: disable=R1711
def stream(
url,
timeout=socket._GLOBAL_DEFAULT_TIMEOUT,
max_retries=0
):
"""Read the response in chunks.
:param str url: The URL to perform the GET request for.
:rtype: Iterable[bytes]
"""
file_size: int = default_range_size # fake filesize to start
downloaded = 0
while downloaded < file_size:
stop_pos = min(downloaded + default_range_size, file_size) - 1
range_header = f"bytes={downloaded}-{stop_pos}"
tries = 0
# Attempt to make the request multiple times as necessary.
while True:
# If the max retries is exceeded, raise an exception
if tries >= 1 + max_retries:
raise MaxRetriesExceeded()
# Try to execute the request, ignoring socket timeouts
try:
response = _execute_request(
url,
method="GET",
headers={"Range": range_header},
timeout=timeout
)
except URLError as e:
# We only want to skip over timeout errors, and
# raise any other URLError exceptions
if isinstance(e.reason, socket.timeout):
pass
else:
raise
except http.client.IncompleteRead:
# Allow retries on IncompleteRead errors for unreliable connections
pass
else:
# On a successful request, break from loop
break
tries += 1
if file_size == default_range_size:
try:
content_range = response.info()["Content-Range"]
file_size = int(content_range.split("/")[1])
except (KeyError, IndexError, ValueError) as e:
logger.error(e)
while True:
chunk = response.read()
if not chunk:
break
downloaded += len(chunk)
yield chunk
return # pylint: disable=R1711
@lru_cache()
def filesize(url):
"""Fetch size in bytes of file at given URL
:param str url: The URL to get the size of
:returns: int: size in bytes of remote file
"""
return int(head(url)["content-length"])
@lru_cache()
def seq_filesize(url):
"""Fetch size in bytes of file at given URL from sequential requests
:param str url: The URL to get the size of
:returns: int: size in bytes of remote file
"""
total_filesize = 0
# YouTube expects a request sequence number as part of the parameters.
split_url = parse.urlsplit(url)
base_url = '%s://%s/%s?' % (split_url.scheme, split_url.netloc, split_url.path)
querys = dict(parse.parse_qsl(split_url.query))
# The 0th sequential request provides the file headers, which tell us
# information about how the file is segmented.
querys['sq'] = 0
url = base_url + parse.urlencode(querys)
response = _execute_request(
url, method="GET"
)
response_value = response.read()
# The file header must be added to the total filesize
total_filesize += len(response_value)
# We can then parse the header to find the number of segments
segment_count = 0
stream_info = response_value.split(b'\r\n')
segment_regex = b'Segment-Count: (\\d+)'
for line in stream_info:
# One of the lines should contain the segment count, but we don't know
# which, so we need to iterate through the lines to find it
try:
segment_count = int(regex_search(segment_regex, line, 1))
except RegexMatchError:
pass
if segment_count == 0:
raise RegexMatchError('seq_filesize', segment_regex)
# We make HEAD requests to the segments sequentially to find the total filesize.
seq_num = 1
while seq_num <= segment_count:
# Create sequential request URL
querys['sq'] = seq_num
url = base_url + parse.urlencode(querys)
total_filesize += int(head(url)['content-length'])
seq_num += 1
return total_filesize
def head(url):
"""Fetch headers returned http GET request.
:param str url:
The URL to perform the GET request for.
:rtype: dict
:returns:
dictionary of lowercase headers
"""
response_headers = _execute_request(url, method="HEAD").info()
return {k.lower(): v for k, v in response_headers.items()}
|
unlicense
|
7717abb37be2a302ac4afc9880d28652
| 31.120755
| 85
| 0.620301
| 4.112077
| false
| false
| false
| false
|
mozilla-iam/cis
|
python-modules/cis_crypto/cis_crypto/operation.py
|
1
|
5491
|
import json
import logging
import os
import yaml
from jose import jwk
from jose import jws
from jose.exceptions import JWSError
from cis_crypto import secret
from cis_crypto import common
logger = logging.getLogger(__name__)
# Note:
# These attrs on sign/verify could be refactored to use object inheritance. Leaving as is for now for readability.
class Sign(object):
def __init__(self):
self.config = common.get_config()
self.key_name = self.config("signing_key_name", namespace="cis", default="file")
self._jwk = None
self.secret_manager = self.config("secret_manager", namespace="cis", default="file")
self.payload = None
def load(self, data):
"""Loads a payload to the object and ensures that the thing is serializable."""
try:
data = yaml.safe_load(data)
except yaml.scanner.ScannerError:
logger.debug("This file is likely not YAML. Attempting JSON load.")
except AttributeError:
logger.debug("This file is likely not YAML. Attempting JSON load.")
if isinstance(data, str):
data = json.loads(data)
else:
pass
self.payload = data
return self.payload
def jws(self, keyname=None):
"""Assumes you loaded a payload. Returns a jws."""
# Override key name
if keyname is not None:
self.key_name = keyname
key_jwk = self._get_key()
sig = jws.sign(self.payload, key_jwk.to_dict(), algorithm="RS256")
return sig
def _get_key(self):
if self._jwk is None:
manager = secret.Manager(provider_type=self.secret_manager)
self._jwk = manager.get_key(key_name=self.key_name)
return self._jwk
class Verify(object):
def __init__(self):
self.config = common.get_config()
# Provide file or URL as opts.
self.well_known_mode = self.config("well_known_mode", namespace="cis", default="file")
self.public_key_name = None # Optional for use with file based well known mode
self.jws_signature = None
self.well_known = None # Well known JSON data
def load(self, jws_signature):
"""Takes data in the form of a dict() and a JWS sig."""
# Store the original form in the jws_signature attribute
self.jws_signature = jws_signature
def _get_public_key(self, keyname=None):
"""Returns a jwk construct for the public key and mode specified."""
if self.well_known_mode == "file":
key_dir = self.config(
"secret_manager_file_path",
namespace="cis",
default=("{}/.mozilla-iam/keys/".format(os.path.expanduser("~"))),
)
key_name = self.config("public_key_name", namespace="cis", default="access-file-key")
file_name = "{}".format(key_name)
fh = open((os.path.join(key_dir, file_name)), "rb")
key_content = fh.read()
key_construct = jwk.construct(key_content, "RS256")
return [key_construct.to_dict()]
elif self.well_known_mode == "http" or self.well_known_mode == "https":
logger.debug("Well known mode engaged. Reducing key structure.", extra={"well_known": self.well_known})
return self._reduce_keys(keyname)
def _reduce_keys(self, keyname):
access_file_keys = self.well_known["access_file"]["jwks"]["keys"]
publishers_supported = self.well_known["api"]["publishers_jwks"]
keys = []
if "access-file-key" in self.config("public_key_name", namespace="cis"):
logger.debug("This is an access file verification.")
return access_file_keys
else:
# If not an access key verification this will attempt to verify against any listed publisher.
keys = publishers_supported[keyname]["keys"]
logger.debug("Publisher based verification, will use {} public keys for verification.".format(keys))
return keys
def jws(self, keyname=None):
"""Assumes you loaded a payload. Return the same jws or raise a custom exception."""
key_material = self._get_public_key(keyname)
logger.debug(
"The key material for the payload was loaded for: {}".format(keyname), extra={"key_material": key_material}
)
if isinstance(key_material, list):
logger.debug("Multiple keys returned. Attempting match.")
for key in key_material:
try:
key.pop("x5t", None)
key.pop("x5c", None)
except AttributeError:
logger.warn("x5t and x5c attrs do not exist in key material.")
logger.debug("Attempting to match against: {}".format(key))
try:
sig = jws.verify(self.jws_signature, key, algorithms="RS256", verify=True)
logger.debug(
"Matched a verified signature for: {}".format(key), extra={"signature": self.jws_signature}
)
return sig
except JWSError as e:
logger.error(
"The signature was not valid for the payload.", extra={"signature": self.jws_signature}
)
logger.error(e)
raise JWSError("The signature could not be verified for any trusted key", key_material)
|
mpl-2.0
|
41f5b14b1044c337dd5dde3addfa3cd4
| 40.285714
| 119
| 0.590056
| 4.082528
| false
| true
| false
| false
|
mozilla-iam/cis
|
python-modules/cis_logger/cis_logger/__init__.py
|
1
|
1579
|
import logging.handlers
from pythonjsonlogger import jsonlogger
import datetime
class JsonFormatter(jsonlogger.JsonFormatter, object):
def __init__(
self,
fmt="%(asctime) %(name) %(processName) %(filename) \
%(funcName) %(levelname) %(lineno) %(module) %(threadName) %(message)",
datefmt="%Y-%m-%dT%H:%M:%SZ%z",
style="%",
extra={},
*args,
**kwargs
):
self._extra = extra
jsonlogger.JsonFormatter.__init__(self, fmt=fmt, datefmt=datefmt, *args, **kwargs)
def process_log_record(self, log_record):
if "asctime" in log_record:
log_record["timestamp"] = log_record["asctime"]
else:
log_record["timestamp"] = datetime.datetime.now(datetime.timezone.utc).strftime("%Y-%m-%dT%H:%M:%S.%fZ%z")
if self._extra is not None:
for key, value in self._extra.items():
log_record[key] = value
return super(JsonFormatter, self).process_log_record(log_record)
class SysLogJsonHandler(logging.handlers.SysLogHandler, object):
def __init__(
self,
address=("localhost", logging.handlers.SYSLOG_UDP_PORT),
facility=logging.handlers.SysLogHandler.LOG_USER,
socktype=None,
prefix="",
):
super(SysLogJsonHandler, self).__init__(address, facility, socktype)
self._prefix = prefix
if self._prefix != "":
self._prefix = prefix + ": "
def format(self, record):
return self._prefix + super(SysLogJsonHandler, self).format(record)
|
mpl-2.0
|
d294b79b643857c42fe48606e33c9387
| 33.326087
| 118
| 0.59658
| 3.879607
| false
| false
| false
| false
|
ibm-watson-iot/iot-python
|
src/wiotp/sdk/api/registry/devices.py
|
2
|
15894
|
# *****************************************************************************
# Copyright (c) 2018 IBM Corporation and other Contributors.
#
# All rights reserved. This program and the accompanying materials
# are made available under the terms of the Eclipse Public License v1.0
# which accompanies this distribution, and is available at
# http://www.eclipse.org/legal/epl-v10.html
# *****************************************************************************
import iso8601
from datetime import datetime
import json
from collections import defaultdict
from wiotp.sdk.api.common import IterableList
from wiotp.sdk.exceptions import ApiException
from wiotp.sdk.api.registry.diag import DeviceLogs, DeviceErrorCodes
class LogEntry(defaultdict):
def __init__(self, **kwargs):
if not set(["message", "timestamp"]).issubset(kwargs):
raise Exception("message and timestamp are required properties for a LogEntry")
kwargs["timestamp"] = iso8601.parse_date(kwargs["timestamp"])
dict.__init__(self, **kwargs)
@property
def message(self):
return self["message"]
@property
def timestamp(self):
return self["timestamp"]
class DeviceUid(defaultdict):
def __init__(self, **kwargs):
if not set(["deviceId", "typeId"]).issubset(kwargs):
raise Exception("typeId and deviceId are required properties to uniquely identify a device")
dict.__init__(self, **kwargs)
@property
def typeId(self):
return self["typeId"]
@property
def deviceId(self):
return self["deviceId"]
def __str__(self):
return self["typeId"] + ":" + self["deviceId"]
def __repr__(self):
return json.dumps(self, sort_keys=True, indent=2)
class DeviceCreateRequest(defaultdict):
def __init__(self, typeId, deviceId, authToken=None, deviceInfo=None, location=None, metadata=None):
dict.__init__(
self,
typeId=typeId,
deviceId=deviceId,
authToken=authToken,
deviceInfo=deviceInfo,
location=location,
metadata=metadata,
)
@property
def typeId(self):
return self["typeId"]
@property
def deviceId(self):
return self["deviceId"]
@property
def authToken(self):
return self["authToken"]
@property
def deviceInfo(self):
return DeviceInfo(**self["deviceInfo"])
@property
def location(self):
return self["location"]
@property
def metadata(self):
return self["metadata"]
class DeviceLocation(defaultdict):
def __init__(self, **kwargs):
if not set(["latitude", "longitude"]).issubset(kwargs):
raise Exception("Data passed to Device is not correct: %s" % (json.dumps(kwargs, sort_keys=True)))
if "measuredDateTime" in kwargs and not isinstance(kwargs["measuredDateTime"], datetime):
kwargs["measuredDateTime"] = iso8601.parse_date(kwargs["measuredDateTime"])
if "updatedDateTime" in kwargs and not isinstance(kwargs["updatedDateTime"], datetime):
kwargs["updatedDateTime"] = iso8601.parse_date(kwargs["updatedDateTime"])
dict.__init__(self, **kwargs)
@property
def latitude(self):
return self["latitude"]
@property
def longitude(self):
return self["longitude"]
@property
def measuredDateTime(self):
return self.get("measuredDateTime", None)
@property
def updatedDateTime(self):
return self.get("updatedDateTime", None)
class DeviceCreateResponse(defaultdict):
def __init__(self, **kwargs):
dict.__init__(self, **kwargs)
@property
def typeId(self):
return self["typeId"]
@property
def deviceId(self):
return self["deviceId"]
@property
def success(self):
return self.get("success", None)
@property
def authToken(self):
return self["authToken"]
class DeviceInfo(defaultdict):
def __init__(
self,
description=None,
deviceClass=None,
fwVersion=None,
hwVersion=None,
manufacturer=None,
model=None,
serialNumber=None,
descriptiveLocation=None,
):
dict.__init__(
self,
description=description,
deviceClass=deviceClass,
fwVersion=fwVersion,
hwVersion=hwVersion,
manufacturer=manufacturer,
model=model,
serialNumber=serialNumber,
descriptiveLocation=descriptiveLocation,
)
@property
def description(self):
return self["description"]
@property
def deviceClass(self):
return self["deviceClass"]
@property
def fwVersion(self):
return self["fwVersion"]
@property
def hwVersion(self):
return self["hwVersion"]
@property
def manufacturer(self):
return self["manufacturer"]
@property
def model(self):
return self["model"]
@property
def serialNumber(self):
return self["serialNumber"]
@property
def descriptiveLocation(self):
return self["descriptiveLocation"]
class Device(defaultdict):
def __init__(self, apiClient, **kwargs):
self._apiClient = apiClient
if not set(["clientId", "deviceId", "typeId"]).issubset(kwargs):
raise Exception("Data passed to Device is not correct: %s" % (json.dumps(kwargs, sort_keys=True)))
self.diagLogs = DeviceLogs(self._apiClient, kwargs["typeId"], kwargs["deviceId"])
self.diagErrorCodes = DeviceErrorCodes(self._apiClient, kwargs["typeId"], kwargs["deviceId"])
dict.__init__(self, **kwargs)
# {u'clientId': u'xxxxxxxxx',
# u'deviceId': u'xxxxxxx',
# u'deviceInfo': {u'description': u'None (xxxxxxxx)',
# u'deviceClass': u'None',
# u'fwVersion': u'xxxxx',
# u'hwVersion': u'xxxxx',
# u'manufacturer': u'xxxx.',
# u'model': u'xxxx',
# u'serialNumber': u'xxxxxxxxx'},
# u'metadata': {},
# u'refs': {u'diag': {u'errorCodes': u'/api/v0002/device/types/xxx/devices/xxxx/diag/errorCodes',
# u'logs': u'/api/v0002/device/types/xxx/devices/xxxx/diag/logs'},
# u'location': u'/api/v0002/device/types/xxxx/devices/xxxx/location',
# u'mgmt': u'/api/v0002/device/types/xx/devices/xxxx/mgmt'},
# u'registration': {u'auth': {u'id': u'xxxxxx',
# u'type': u'person'},
# u'date': u'2015-09-18T06:44:02.000Z'},
# u'status': {u'alert': {u'enabled': False,
# u'timestamp': u'2016-01-21T02:25:55.543Z'}},
# u'typeId': u'vm'}
@property
def clientId(self):
return self["clientId"]
@property
def deviceId(self):
return self["deviceId"]
@property
def authToken(self):
if "authToken" in self:
return self["authToken"]
else:
return None
@property
def metadata(self):
if "metadata" in self:
return self["metadata"]
else:
return None
@property
def total_rows(self):
return self["total_rows"]
@property
def deviceInfo(self):
# Unpack the deviceInfo dictionary into keyword arguments so that we
# can return a DeviceInfo object instead of a plain dictionary
return DeviceInfo(**self["deviceInfo"])
@property
def typeId(self):
return self["typeId"]
def __str__(self):
return "[%s] %s" % (self.clientId, self.deviceInfo.description or "<No description>")
def __repr__(self):
return json.dumps(self, sort_keys=True, indent=2)
def json(self):
return dict(self)
# Extended properties
def getMgmt(self):
r = self._apiClient.get("api/v0002/device/types/%s/devices/%s/mgmt" % (self.typeId, self.deviceId))
if r.status_code == 200:
return r.json()
if r.status_code == 404:
# It's perfectly valid for a device to not have a location set, if this is the case, set response to None
return None
else:
raise ApiException(r)
def getLocation(self):
r = self._apiClient.get("api/v0002/device/types/%s/devices/%s/location" % (self.typeId, self.deviceId))
if r.status_code == 200:
return DeviceLocation(**r.json())
if r.status_code == 404:
# It's perfectly valid for a device to not have a location set, if this is the case, set response to None
return None
else:
raise ApiException(r)
def setLocation(self, value):
r = self._apiClient.put("api/v0002/device/types/%s/devices/%s/location" % (self.typeId, self.deviceId), value)
if r.status_code == 200:
return DeviceLocation(**r.json())
else:
raise ApiException(r)
def getConnectionLogs(self):
r = self._apiClient.get(
"api/v0002/logs/connection", parameters={"typeId": self.typeId, "deviceId": self.deviceId}
)
if r.status_code == 200:
responseList = []
for entry in r.json():
responseList.append(LogEntry(**entry))
return responseList
else:
raise ApiException(r)
class IterableDeviceList(IterableList):
def __init__(self, apiClient, typeId=None):
if typeId is None:
super(IterableDeviceList, self).__init__(apiClient, Device, "api/v0002/bulk/devices", "typeId,deviceId")
else:
super(IterableDeviceList, self).__init__(
apiClient, Device, "api/v0002/device/types/%s/devices/" % (typeId), "deviceId"
)
class Devices(defaultdict):
"""
Use the global unique identifier of a device, it's `clientId` to address devices.
# Delete
```python
del devices["d:orgId:typeId:deviceId"]
```
# Get
Use the global unique identifier of a device, it's `clientId`.
```python
device = devices["d:orgId:typeId:deviceId"]
print(device.clientId)
print(device)
# Is a device registered?
```python
if "d:orgId:typeId:deviceId" in devices:
print("The device exists")
```
# Iterate through all registered devices
```python
for device in devices:
print(device)
```
"""
# https://docs.python.org/2/library/collections.html#defaultdict-objects
def __init__(self, apiClient, typeId=None):
self._apiClient = apiClient
self.typeId = typeId
def __contains__(self, key):
"""
Does a device exist?
"""
if self.typeId is None:
(classIdentifier, orgId, typeId, deviceId) = key.split(":")
deviceUrl = "api/v0002/device/types/%s/devices/%s" % (typeId, deviceId)
else:
deviceUrl = "api/v0002/device/types/%s/devices/%s" % (self.typeId, key)
r = self._apiClient.get(deviceUrl)
if r.status_code == 200:
return True
elif r.status_code == 404:
return False
else:
raise ApiException(r)
def __getitem__(self, key):
"""
Get a device from the registry
"""
if self.typeId is None:
(classIdentifier, orgId, typeId, deviceId) = key.split(":")
deviceUrl = "api/v0002/device/types/%s/devices/%s" % (typeId, deviceId)
else:
deviceUrl = "api/v0002/device/types/%s/devices/%s" % (self.typeId, key)
r = self._apiClient.get(deviceUrl)
if r.status_code == 200:
return Device(apiClient=self._apiClient, **r.json())
elif r.status_code == 404:
self.__missing__(key)
else:
raise ApiException(r)
def __setitem__(self, key, value):
"""
Register a new device - not currently supported via this interface, use: `registry.devices.create()`
"""
raise Exception("Unable to register or update a device via this interface at the moment.")
def __delitem__(self, key):
"""
Delete a device
"""
if self.typeId is None:
(classIdentifier, orgId, typeId, deviceId) = key.split(":")
deviceUrl = "api/v0002/device/types/%s/devices/%s" % (typeId, deviceId)
else:
deviceUrl = "api/v0002/device/types/%s/devices/%s" % (self.typeId, key)
r = self._apiClient.delete(deviceUrl)
if r.status_code == 404:
self.__missing__(key)
elif r.status_code != 204:
raise ApiException(r)
def __missing__(self, key):
"""
Device does not exist
"""
raise KeyError("Device %s does not exist" % (key))
def __iter__(self, *args, **kwargs):
"""
Iterate through all devices
"""
return IterableDeviceList(self._apiClient, self.typeId)
@property
def total_rows(self):
"""
Returns total devices
"""
return self["total_rows"]
def create(self, devices):
"""
Register one or more new devices, each request can contain a maximum of 512KB.
The response body will contain the generated authentication tokens for all devices.
You must make sure to record these tokens when processing the response.
We are not able to retrieve lost authentication tokens
It accepts accepts a list of devices (List of Dictionary of Devices), or a single device
If you provide a list as the parameter it will return a list in response
If you provide a singular device it will return a singular response
"""
if not isinstance(devices, list):
listOfDevices = [devices]
returnAsAList = False
else:
listOfDevices = devices
returnAsAList = True
r = self._apiClient.post("api/v0002/bulk/devices/add", listOfDevices)
if r.status_code in [201, 202]:
if returnAsAList:
responseList = []
for entry in r.json():
responseList.append(DeviceCreateResponse(**entry))
return responseList
else:
return DeviceCreateResponse(**r.json()[0])
else:
raise ApiException(r)
def update(self, deviceUid, metadata=None, deviceInfo=None, status=None):
"""
Update an existing device
"""
if not isinstance(deviceUid, DeviceUid) and isinstance(deviceUid, dict):
deviceUid = DeviceUid(**deviceUid)
deviceUrl = "api/v0002/device/types/%s/devices/%s" % (deviceUid.typeId, deviceUid.deviceId)
data = {"status": status, "deviceInfo": deviceInfo, "metadata": metadata}
r = self._apiClient.put(deviceUrl, data)
if r.status_code == 200:
return Device(apiClient=self._apiClient, **r.json())
else:
raise ApiException(r)
def delete(self, devices):
"""
Delete one or more devices, each request can contain a maximum of 512Kb
It accepts accepts a list of devices (List of Dictionary of Devices)
In case of failure it throws APIException
"""
if not isinstance(devices, list):
listOfDevices = [devices]
else:
listOfDevices = devices
r = self._apiClient.post("api/v0002/bulk/devices/remove", listOfDevices)
if r.status_code in [200, 202]:
return r.json()
else:
raise ApiException(r)
|
epl-1.0
|
2ee6e0ceefe91a835b0e38b3a5447d3d
| 29.624277
| 118
| 0.577702
| 4.125097
| false
| false
| false
| false
|
ibm-watson-iot/iot-python
|
src/wiotp/sdk/api/dsc/destinations.py
|
2
|
4384
|
# *****************************************************************************
# Copyright (c) 2019 IBM Corporation and other Contributors.
#
# All rights reserved. This program and the accompanying materials
# are made available under the terms of the Eclipse Public License v1.0
# which accompanies this distribution, and is available at
# http://www.eclipse.org/legal/epl-v10.html
# *****************************************************************************
from collections import defaultdict
import iso8601
from wiotp.sdk.exceptions import ApiException
from wiotp.sdk.api.common import IterableList, RestApiDict
# See docs @ https://orgid.internetofthings.ibmcloud.com/docs/v0002/historian-connector.html
class Destination(defaultdict):
def __init__(self, **kwargs):
dict.__init__(self, **kwargs)
@property
def name(self):
# Unlike most other resources name == the UUID, there is no seperate id property
return self["name"]
@property
def destinationType(self):
return self["type"]
@property
def configuration(self):
return self["configuration"]
# EventStreams only configuration
@property
def partitions(self):
if self["type"] == "eventstreams":
return self["configuration"]["partitions"]
else:
return None
# Cloudant only configuration
@property
def bucketInterval(self):
if self["type"] == "cloudant":
return self["configuration"]["bucketInterval"]
else:
return None
# Cloudant only configuration
@property
def retentionDays(self):
# this is an optional parameter so check if it exists
if "configuration" in self and "retentionDays" in self["configuration"]:
return self["configuration"]["retentionDays"]
else:
return None
# DB2/Postgres only configuration
@property
def columns(self):
# this is an optional parameter so check if it exists
if "configuration" in self and "columns" in self["configuration"]:
return self["configuration"]["columns"]
else:
return None
class IterableDestinationList(IterableList):
def __init__(self, apiClient, url, filters=None):
# This API does not support sorting
super(IterableDestinationList, self).__init__(
apiClient, Destination, url, sort=None, filters=filters, passApiClient=False
)
class Destinations(RestApiDict):
def __init__(self, apiClient, connectorId, connectorType):
super(Destinations, self).__init__(
apiClient,
Destination,
IterableDestinationList,
"api/v0002/historianconnectors/%s/destinations" % connectorId,
)
self.connectorId = connectorId
self.connectorType = connectorType
self.allDestinationsUrl = "api/v0002/historianconnectors/%s/destinations" % connectorId
def find(self, nameFilter=None):
queryParms = {}
if nameFilter:
queryParms["name"] = nameFilter
return IterableDestinationList(self._apiClient, self.allDestinationsUrl, filters=queryParms)
def create(self, name, **kwargs):
if self.connectorType == "cloudant":
if "bucketInterval" not in kwargs.keys():
raise Exception("You must specify bucketInterval parameter on create for a Cloudant destination")
if self.connectorType == "eventstreams":
if "partitions" not in kwargs.keys():
raise Exception("You must specify partitions parameter on create for an EventStreams destination")
if self.connectorType == "db2" or self.connectorType == "postgres":
if "columns" not in kwargs.keys():
raise Exception("You must specify a columns parameter on create for a DB2 or Postgres destination")
destination = {"name": name, "type": self.connectorType, "configuration": kwargs}
r = self._apiClient.post(self.allDestinationsUrl, data=destination)
if r.status_code == 201:
return Destination(**r.json())
else:
raise ApiException(r)
def update(self, key, item):
"""
Create an Item - not supported for CTIVE item
"""
raise Exception("The API doesn't support updating a destination.")
|
epl-1.0
|
2af7656e3d7285851117f5bc8a1cd1ad
| 35.231405
| 115
| 0.627053
| 4.629356
| false
| true
| false
| false
|
ibm-watson-iot/iot-python
|
src/wiotp/sdk/api/state/state.py
|
2
|
2752
|
# *****************************************************************************
# Copyright (c) 2019 IBM Corporation and other Contributors.
#
# All rights reserved. This program and the accompanying materials
# are made available under the terms of the Eclipse Public License v1.0
# which accompanies this distribution, and is available at
# http://www.eclipse.org/legal/epl-v10.html
# *****************************************************************************
from collections import defaultdict
import iso8601
from wiotp.sdk.exceptions import ApiException
from wiotp.sdk.api.common import IterableList
from wiotp.sdk.api.common import RestApiDict
from wiotp.sdk.api.common import RestApiItemBase
from wiotp.sdk.api.common import RestApiDictReadOnly
# See docs @ https://orgid.internetofthings.ibmcloud.com/docs/v0002-beta/State-mgr-beta.html
class State(defaultdict):
def __init__(self, apiClient, url, **kwargs):
self._apiClient = apiClient
self._url = url
dict.__init__(self, **kwargs)
@property
def state(self):
return self["state"]
@property
def timestamp(self):
return iso8601.parse_date(self["timestamp"])
@property
def updated(self):
return iso8601.parse_date(self["updated"])
def __callPatchOperation__(self, body):
r = self._apiClient.patch(self._url, body)
if r.status_code == 200:
return r.json()
else:
raise Exception("Unexpected response from API (%s) = %s %s" % (self._url, r.status_code, r.text))
def reset(self):
return self.__callPatchOperation__({"operation": "reset-state"})
class States(RestApiDictReadOnly):
def __init__(self, apiClient, typeId, instanceId):
url = "api/v0002/device/types/%s/devices/%s/state" % (typeId, instanceId)
super(States, self).__init__(apiClient, State, None, url)
# TBD this method overrides the base class method to pass the state URL to the constructed state
# without this, we can't invoke reset-state api call.
def __getitem__(self, key):
url = self._singleItemUrl % (key)
r = self._apiClient.get(url)
if r.status_code == 200:
return self._castToClass(apiClient=self._apiClient, url=url, **r.json())
if r.status_code == 404:
self.__missing__(key)
else:
raise ApiException(r)
# override the standard iterator as there is no api to get all state itetrating over LIs
def __iter__(self, *args, **kwargs):
raise Exception("Unable to iterate through device state. Retrieve it for a specific LI.")
def find(self, query_params={}):
raise Exception("Unable to find device state. Retrieve it for a specific LI.")
|
epl-1.0
|
58605d9eeb71bac265a21eb036021baf
| 36.69863
| 109
| 0.628634
| 3.97114
| false
| false
| false
| false
|
ibm-watson-iot/iot-python
|
src/wiotp/sdk/api/usage/__init__.py
|
2
|
2241
|
# *****************************************************************************
# Copyright (c) 2018 IBM Corporation and other Contributors.
#
# All rights reserved. This program and the accompanying materials
# are made available under the terms of the Eclipse Public License v1.0
# which accompanies this distribution, and is available at
# http://www.eclipse.org/legal/epl-v10.html
# *****************************************************************************
from datetime import datetime
from collections import defaultdict
from wiotp.sdk.exceptions import ApiException
class DataTransferSummary(defaultdict):
def __init__(self, **kwargs):
daysAsObj = []
if "days" in kwargs and kwargs["days"] is not None:
for day in kwargs["days"]:
daysAsObj.append(DayDataTransfer(**day))
del kwargs["days"]
dict.__init__(self, days=daysAsObj, **kwargs)
@property
def start(self):
return datetime.strptime(self["start"], "%Y-%m-%d").date()
@property
def end(self):
return datetime.strptime(self["end"], "%Y-%m-%d").date()
@property
def average(self):
return self["average"]
@property
def total(self):
return self["total"]
@property
def days(self):
return self["days"]
class DayDataTransfer(defaultdict):
def __init__(self, **kwargs):
dict.__init__(self, **kwargs)
@property
def date(self):
return datetime.strptime(self["date"], "%Y-%m-%d").date()
@property
def total(self):
return self["total"]
class Usage:
def __init__(self, apiClient):
self._apiClient = apiClient
def dataTransfer(self, start, end, detail=False):
"""
Retrieve the organization-specific status of each of the services offered by the IBM Watson IoT Platform.
In case of failure it throws APIException
"""
r = self._apiClient.get(
"api/v0002/usage/data-traffic?start=%s&end=%s&detail=%s"
% (start.strftime("%Y-%m-%d"), end.strftime("%Y-%m-%d"), detail)
)
if r.status_code == 200:
return DataTransferSummary(**r.json())
else:
raise ApiException(r)
|
epl-1.0
|
a70bd166a2e5bef336742ee9905bf3e0
| 28.486842
| 113
| 0.57162
| 4.18097
| false
| false
| false
| false
|
ibm-watson-iot/iot-python
|
test/test_device_mgd.py
|
2
|
11230
|
# *****************************************************************************
# Copyright (c) 2016,2018 IBM Corporation and other Contributors.
#
# All rights reserved. This program and the accompanying materials
# are made available under the terms of the Eclipse Public License v1.0
# which accompanies this distribution, and is available at
# http://www.eclipse.org/legal/epl-v10.html
# *****************************************************************************
import pytest
import testUtils
import uuid
import os
import wiotp.sdk
import time
from wiotp.sdk.device import ManagedDeviceClient
from wiotp.sdk import Utf8Codec
class TestDeviceMgd(testUtils.AbstractTest):
def testManagedDeviceQSException(self):
with pytest.raises(wiotp.sdk.ConfigurationException) as e:
options = {"identity": {"orgId": "quickstart", "typeId": "xxx", "deviceId": "xxx"}}
wiotp.sdk.device.ManagedDeviceClient(options)
assert "QuickStart does not support device management" == e.value.reason
def testManagedDeviceConnectException(self, device):
badOptions = {
"identity": {"orgId": self.ORG_ID, "typeId": device.typeId, "deviceId": device.deviceId},
"auth": {"token": "xxxxxxxxxxxxxxxxxx"},
}
deviceInfoObj = wiotp.sdk.device.DeviceInfo()
managedDevice = wiotp.sdk.device.ManagedDeviceClient(badOptions, deviceInfo=deviceInfoObj)
assert isinstance(managedDevice, wiotp.sdk.device.ManagedDeviceClient)
with pytest.raises(wiotp.sdk.ConnectionException) as e:
managedDevice.connect()
assert managedDevice.isConnected() == False
def testManagedDeviceConnect(self, device):
badOptions = {
"identity": {"orgId": self.ORG_ID, "typeId": device.typeId, "deviceId": device.deviceId},
"auth": {"token": device.authToken},
}
deviceInfoObj = wiotp.sdk.device.DeviceInfo()
managedDevice = wiotp.sdk.device.ManagedDeviceClient(badOptions, deviceInfo=deviceInfoObj)
assert isinstance(managedDevice, wiotp.sdk.device.ManagedDeviceClient)
managedDevice.connect()
assert managedDevice.isConnected() == True
managedDevice.disconnect()
assert managedDevice.isConnected() == False
def testManagedDeviceSetPropertyNameNone(self):
with pytest.raises(Exception) as e:
config = {
"identity": {"orgId": "1", "typeId": "xxx", "deviceId": "xxx"},
"auth": {"token": "xxxxxxxxxxxxxxxxxx"},
}
managedDeviceClientValue = ManagedDeviceClient(config)
managedDeviceClientValue.setProperty(value=1)
assert "Unsupported property name: " in str(e.value)
def testManagedDeviceSetPropertyValue(self):
try:
config = {
"identity": {"orgId": "1", "typeId": "xxx", "deviceId": "xxx"},
"auth": {"token": "xxxxxxxxxxxxxxxxxx"},
}
managedDeviceClientValue = ManagedDeviceClient(config)
testName = "model"
testValue = 2
test = managedDeviceClientValue.setProperty(name=testName, value=testValue)
assert managedDeviceClientValue._deviceInfo[testName] == testValue
except:
assert False == True
# TO DO Rest of SetProperty and Notifyfieldchange (onSubscribe put variables)
# Code in comments hangs when running but improves percentage
# Look into later
# def testManagedDeviceManageOnSubscribe(self):
# try:
# config = {
# "identity": {"orgId": "1", "typeId": "xxx", "deviceId": "xxx"},
# "auth": {"token": "xxxxxxxxxxxxxxxxxx"},
# }
# managedDeviceClientValue = ManagedDeviceClient(config)
# test = managedDeviceClientValue._onSubscribe(mqttc=1, userdata=2, mid=3, granted_qos=4)
# assert True
# except:
# assert False == True
def testManagedDeviceManageLifetimeValueZero(self):
try:
config = {
"identity": {"orgId": "1", "typeId": "xxx", "deviceId": "xxx"},
"auth": {"token": "xxxxxxxxxxxxxxxxxx"},
}
managedDeviceClientValue = ManagedDeviceClient(config)
test = managedDeviceClientValue.manage(lifetime=3000)
assert True
except:
assert False == True
def testManagedDeviceUnManage(self):
try:
config = {
"identity": {"orgId": "1", "typeId": "xxx", "deviceId": "xxx"},
"auth": {"token": "xxxxxxxxxxxxxxxxxx"},
}
managedDeviceClientValue = ManagedDeviceClient(config)
test = managedDeviceClientValue.unmanage()
assert True
except:
assert False == True
def testManagedDeviceSetLocationLongitude(self):
try:
config = {
"identity": {"orgId": "1", "typeId": "xxx", "deviceId": "xxx"},
"auth": {"token": "xxxxxxxxxxxxxxxxxx"},
}
managedDeviceClientValue = ManagedDeviceClient(config)
test = managedDeviceClientValue.setLocation(longitude=1, latitude=2)
assert managedDeviceClientValue._location["longitude"] == 1
except:
assert False == True
def testManagedDeviceSetLocationLatitude(self):
try:
config = {
"identity": {"orgId": "1", "typeId": "xxx", "deviceId": "xxx"},
"auth": {"token": "xxxxxxxxxxxxxxxxxx"},
}
managedDeviceClientValue = ManagedDeviceClient(config)
test = managedDeviceClientValue.setLocation(longitude=1, latitude=2)
assert managedDeviceClientValue._location["latitude"] == 2
except:
assert False == True
def testManagedDeviceSetLocationElevation(self):
try:
config = {
"identity": {"orgId": "1", "typeId": "xxx", "deviceId": "xxx"},
"auth": {"token": "xxxxxxxxxxxxxxxxxx"},
}
managedDeviceClientValue = ManagedDeviceClient(config)
test = managedDeviceClientValue.setLocation(longitude=1, latitude=2, elevation=3)
assert managedDeviceClientValue._location["elevation"] == 3
except:
assert False == True
def testManagedDeviceSetLocationAccuracy(self):
try:
config = {
"identity": {"orgId": "1", "typeId": "xxx", "deviceId": "xxx"},
"auth": {"token": "xxxxxxxxxxxxxxxxxx"},
}
managedDeviceClientValue = ManagedDeviceClient(config)
test = managedDeviceClientValue.setLocation(longitude=1, latitude=2, elevation=3, accuracy=4)
assert managedDeviceClientValue._location["accuracy"] == 4
except:
assert False == True
def testManagedDeviceSetErrorCodeNone(self):
try:
config = {
"identity": {"orgId": "1", "typeId": "xxx", "deviceId": "xxx"},
"auth": {"token": "xxxxxxxxxxxxxxxxxx"},
}
managedDeviceClientValue = ManagedDeviceClient(config)
test = managedDeviceClientValue.setErrorCode(errorCode=None)
assert managedDeviceClientValue._errorCode == 0
except:
assert False == True
def testManagedDeviceSetErrorCode(self):
try:
config = {
"identity": {"orgId": "1", "typeId": "xxx", "deviceId": "xxx"},
"auth": {"token": "xxxxxxxxxxxxxxxxxx"},
}
managedDeviceClientValue = ManagedDeviceClient(config)
test = managedDeviceClientValue.setErrorCode(errorCode=15)
assert True
except:
assert False == True
def testManagedDeviceClearErrorCodes(self):
try:
config = {
"identity": {"orgId": "1", "typeId": "xxx", "deviceId": "xxx"},
"auth": {"token": "xxxxxxxxxxxxxxxxxx"},
}
managedDeviceClientValue = ManagedDeviceClient(config)
test = managedDeviceClientValue.clearErrorCodes()
assert managedDeviceClientValue._errorCode == None
except:
assert False == True
def testManagedDeviceAddLog(self):
try:
config = {
"identity": {"orgId": "1", "typeId": "xxx", "deviceId": "xxx"},
"auth": {"token": "xxxxxxxxxxxxxxxxxx"},
}
managedDeviceClientValue = ManagedDeviceClient(config)
test = managedDeviceClientValue.addLog(msg="h", data="e")
assert True
except:
assert False == True
def testManagedDeviceClearLog(self):
try:
config = {
"identity": {"orgId": "1", "typeId": "xxx", "deviceId": "xxx"},
"auth": {"token": "xxxxxxxxxxxxxxxxxx"},
}
managedDeviceClientValue = ManagedDeviceClient(config)
test = managedDeviceClientValue.clearLog()
assert True
except:
assert False == True
def testManagedDeviceRespondDeviceAction(self):
try:
config = {
"identity": {"orgId": "1", "typeId": "xxx", "deviceId": "xxx"},
"auth": {"token": "xxxxxxxxxxxxxxxxxx"},
}
managedDeviceClientValue = ManagedDeviceClient(config)
test = managedDeviceClientValue.respondDeviceAction(reqId=1)
assert True
except:
assert False == True
# Do line 337 - 571
def testManagedDeviceSetState(self):
try:
config = {
"identity": {"orgId": "1", "typeId": "xxx", "deviceId": "xxx"},
"auth": {"token": "xxxxxxxxxxxxxxxxxx"},
}
managedDeviceClientValue = ManagedDeviceClient(config)
test = managedDeviceClientValue.setState(status=1)
assert True
except:
assert False == True
def testManagedDeviceSetUpdateStatus(self):
try:
config = {
"identity": {"orgId": "1", "typeId": "xxx", "deviceId": "xxx"},
"auth": {"token": "xxxxxxxxxxxxxxxxxx"},
}
managedDeviceClientValue = ManagedDeviceClient(config)
test = managedDeviceClientValue.setUpdateStatus(status=1)
except:
assert False == True
# Use template for rest __functions
def testManagedDeviceMgmtResponseError(self):
with pytest.raises(Exception) as e:
config = {
"identity": {"orgId": "1", "typeId": "xxx", "deviceId": "xxx"},
"auth": {"token": "xxxxxxxxxxxxxxxxxx"},
}
managedDevice = ManagedDeviceClient(config)
testValue = "Test"
encodedPayload = Utf8Codec.encode(testValue)
managedDevice._ManagedDeviceClient__onDeviceMgmtResponse(client=1, userdata=2, pahoMessage=encodedPayload)
assert "Unable to parse JSON. payload=" " error" in str(e.value)
|
epl-1.0
|
88cc7945d570b3766356e9f5cfe113fe
| 40.439114
| 118
| 0.574087
| 4.424744
| false
| true
| false
| false
|
mbj4668/pyang
|
pyang/repository.py
|
1
|
5853
|
"""A repository for searching and holding loaded pyang modules"""
import os
import sys
import io
from . import util
from . import syntax
class Repository(object):
"""Abstract base class that represents a module repository"""
def get_modules_and_revisions(self, ctx):
"""Return a list of all modules and their revisons
Returns a tuple (`modulename`, `revision`, `handle`), where
`handle' is used in the call to get_module_from_handle() to
retrieve the module.
"""
def get_module_from_handle(self, handle):
"""Return the raw module text from the repository
Returns (`ref`, `in_format`, `text`) if found, or None if not found.
`ref` is a string which is used to identify the source of
the text for the user. used in error messages
`in_format` is one of 'yang' or 'yin' or None.
`text` is the raw text data
Raises `ReadError`
"""
class ReadError(Exception):
"""Signals that an error occured during module retrieval"""
class FileRepository(Repository):
def __init__(self, path="", use_env=True, no_path_recurse=False,
verbose=False):
"""Create a Repository which searches the filesystem for modules
`path` is a `os.pathsep`-separated string of directories
"""
Repository.__init__(self)
self.dirs = []
self.no_path_recurse = no_path_recurse
self.modules = None
self.verbose = verbose
for directory in path.split(os.pathsep):
self._add_directory(directory)
while use_env:
use_env = False
modpath = os.getenv('YANG_MODPATH')
if modpath is not None:
for directory in modpath.split(os.pathsep):
self._add_directory(directory)
home = os.getenv('HOME')
if home is not None:
self._add_directory(os.path.join(home, 'yang', 'modules'))
inst = os.getenv('YANG_INSTALL')
if inst is not None:
self._add_directory(os.path.join(inst, 'yang', 'modules'))
break # skip search if install location is indicated
default_install = os.path.join(
sys.prefix, 'share', 'yang', 'modules')
if os.path.exists(default_install):
self._add_directory(default_install)
break # end search if default location exists
# for some systems, sys.prefix returns `/usr`
# but the real location is `/usr/local`
# if the package is installed with pip
# this information can be easily retrieved
import pkgutil
if not pkgutil.find_loader('pip'):
break # abort search if pip is not installed
# hack below to handle pip 10 internals
# if someone knows pip and how to fix this, it would be great!
location = None
try:
import pip.locations as locations
location = locations.distutils_scheme('pyang')
except:
try:
import pip._internal.locations as locations
location = locations.distutils_scheme('pyang')
except:
pass
if location is not None:
self._add_directory(
os.path.join(location['data'], 'share', 'yang', 'modules'))
if verbose:
sys.stderr.write('# module search path: %s\n'
% os.pathsep.join(self.dirs))
def _add_directory(self, directory):
if (not directory
or directory in self.dirs
or not os.path.isdir(directory)):
return False
self.dirs.append(directory)
return True
def _setup(self, ctx):
# check all dirs for yang and yin files
self.modules = []
def add_files_from_dir(d):
try:
files = os.listdir(d)
except OSError:
files = []
for fname in files:
absfilename = os.path.join(d, fname)
if os.path.isfile(absfilename):
m = syntax.re_filename.search(fname)
if m is not None:
name, rev, in_format = m.groups()
if not os.access(absfilename, os.R_OK):
continue
if absfilename.startswith("./"):
absfilename = absfilename[2:]
handle = in_format, absfilename
self.modules.append((name, rev, handle))
elif (not self.no_path_recurse
and d != '.' and os.path.isdir(absfilename)):
add_files_from_dir(absfilename)
for d in self.dirs:
add_files_from_dir(d)
def get_modules_and_revisions(self, ctx):
if self.modules is None:
self._setup(ctx)
return self.modules
def get_module_from_handle(self, handle):
in_format, absfilename = handle
fd = None
try:
fd = io.open(absfilename, "r", encoding="utf-8")
text = fd.read()
if self.verbose:
util.report_file_read(absfilename)
except IOError as ex:
raise self.ReadError("%s: %s" % (absfilename, ex))
except UnicodeDecodeError as ex:
s = str(ex).replace('utf-8', 'utf8')
raise self.ReadError("%s: unicode error: %s" % (absfilename, s))
finally:
if fd is not None:
fd.close()
if in_format is None:
in_format = util.guess_format(text)
return absfilename, in_format, text
|
isc
|
17e8f7f53c9b7c93601f109e9dc07617
| 35.12963
| 79
| 0.537502
| 4.491942
| false
| false
| false
| false
|
mbj4668/pyang
|
test/test_issues/test_i225/test_prefix_deviation.py
|
1
|
3107
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# pylint: disable=redefined-outer-name
"""
tests for PYANG data files
"""
import os
import sys
# hack to handle pip 10 internals
try:
import pip.locations as locations
except ImportError:
import pip._internal.locations as locations
from pyang.context import Context
from pyang.repository import FileRepository
EXISTING_MODULE = 'ietf-yang-types'
DEFAULT_OPTIONS = {
'format': 'yang',
'verbose': True,
'list_errors': True,
'print_error_code': True,
'yang_remove_unused_imports': True,
'yang_canonical': True,
'trim_yin': False,
'keep_comments': True,
'features': [],
'deviations': [],
'path': []
}
"""Default options for pyang command line"""
class objectify(object):
"""Utility for providing object access syntax (.attr) to dicts"""
def __init__(self, *args, **kwargs):
for entry in args:
self.__dict__.update(entry)
self.__dict__.update(kwargs)
def __getattr__(self, _):
return None
def __setattr__(self, attr, value):
self.__dict__[attr] = value
def create_context(path='.', *options, **kwargs):
"""Generates a pyang context
Arguments:
path (str): location of YANG modules.
*options: list of dicts, with options to be passed to context.
**kwargs: similar to ``options`` but have a higher precedence.
Returns:
pyang.Context: Context object for ``pyang`` usage
"""
opts = objectify(DEFAULT_OPTIONS, *options, **kwargs)
repo = FileRepository(path, no_path_recurse=opts.no_path_recurse)
ctx = Context(repo)
ctx.opts = opts
return ctx
def test_can_find_modules_with_pip_install():
"""
context should find the default installed modules even when pyang
is installed using pip
"""
# remove obfuscation from env vars
if os.environ.get('YANG_INSTALL'):
del os.environ['YANG_INSTALL']
if os.environ.get('YANG_MODPATH'):
del os.environ['YANG_MODPATH']
ctx = create_context()
module = ctx.search_module(None, EXISTING_MODULE)
assert module is not None
def test_can_find_modules_when_prefix_differ(monkeypatch):
"""
context should find the default installed modules, without the help
of environment variables, even of the pip install location
differs from ``sys.prefix``
"""
# store pip location.
# monkeypatching sys.prefix will side_effect scheme.
try:
scheme = locations.distutils_scheme('pyang')
monkeypatch.setattr(
locations, 'distutils_scheme', lambda *_: scheme)
except:
print("cannot get scheme from pip, skipping")
return
# simulate #225 description
monkeypatch.setattr(sys, 'prefix', '/usr')
# remove obfuscation from env vars
if os.environ.get('YANG_INSTALL'):
del os.environ['YANG_INSTALL']
if os.environ.get('YANG_MODPATH'):
del os.environ['YANG_MODPATH']
ctx = create_context()
module = ctx.search_module(None, EXISTING_MODULE)
assert module is not None
|
isc
|
48b0babf28f81699b5b6a833358dc176
| 24.891667
| 71
| 0.641455
| 3.86924
| false
| false
| false
| false
|
mbj4668/pyang
|
pyang/plugins/omni.py
|
1
|
11901
|
import optparse
from pyang import plugin
paths_in_module = []
leafrefs = []
key = ''
class_keywords = ["container", "list", "case", "choice", "augment"]
servicepoints = ["servicepoint", "productpoint"]
classnamecolor = " {0.113725, 0.352941, 0.670588}"
mandatoryconfig = " {0.600000, 0.152941, 0.152941}"
optionalconfig = " {0.129412, 0.501961, 0.254902}"
notconfig = " {0.549020, 0.486275, 0.133333}"
#which line for containment, omnigraffles makes some bezier, override this
containsline = " tail type: \"FilledDiamond\", head type: \"None\", line type: \"Straight\" "
leafrefline = " line type: \"Straight\", head type: \"FilledArrow\" "
def pyang_plugin_init():
plugin.register_plugin(OmniPlugin())
class OmniPlugin(plugin.PyangPlugin):
def add_output_format(self, fmts):
self.multiple_modules = True
fmts['omni'] = self
def add_opts(self, optparser):
optlist = [
optparse.make_option("--omni-path",
dest="omni_tree_path",
help="Subtree to print"),
]
g = optparser.add_option_group("OmniGraffle output specific options")
g.add_options(optlist)
def setup_fmt(self, ctx):
ctx.implicit_errors = False
def emit(self, ctx, modules, fd):
if ctx.opts.omni_tree_path is not None:
path = ctx.opts.omni_tree_path.split('/')
if path[0] == '':
path = path[1:]
else:
path = None
print_omni_header(modules, fd, path, ctx)
emit_modules(modules, fd, path, ctx)
post_process(fd, ctx)
print_omni_footer(modules, fd, path, ctx)
def print_omni_header(modules, fd, path, ctx):
# Build doc name from module names
name = ''
for m in modules:
name += m.arg
name = name[:32]
fd.write("""
tell application id "com.omnigroup.OmniGraffle6"
activate
make new document with properties {name:\"%s\"}
set bounds of window 1 to {50, 50, 1200, 800}
tell first canvas of document \"%s\"
set canvasSize to {600, 600}
set name to \"YANG Model\"
set adjusts pages to true
make new shape at end of graphics with properties {fill: no fill, draws stroke: false, draws shadow: false, autosizing: full, size: {32.000000, 20.000000}, text: {size: 8, alignment: center, font: "HelveticaNeue", text: "leafref"}, origin: {2403.202333, 169.219094}}
make new line at end of graphics with properties {point list: {{2513.245592418806, 185.5962102698529}, {2373.745592418806, 185.3149602698529}}, draws shadow: false, head type: "FilledArrow"}
make new shape at end of graphics with properties {fill: no fill, draws stroke: false, draws shadow: false, autosizing: full, size: {105.000000, 20.000000}, text: {size: 8, alignment: center, font: "HelveticaNeue", text: "Schema tree, containment"}, origin: {2397.741930, 138.863190}}
make new line at end of graphics with properties {point list: {{2374.993645107464, 154.4881903780727}, {2514.493645107464, 154.4881903780727}}, draws shadow: false, tail type: "FilledDiamond"}
make new shape at end of graphics with properties {autosizing: vertically only, size: {139.500000, 14.000000}, text: {alignment: center, font: "Helvetica-Bold", text: "Legend"}, text placement: top, origin: {2366.929155, 43.937008}, vertical padding: 0}
make new shape at end of graphics with properties {autosizing: vertically only, size: {139.500000, 56.000000}, text: {{color: {0.600000, 0.152941, 0.152941}, text: "Mandatory config
"}, {color: {0.129412, 0.501961, 0.254902}, text: "Optional config
"}, {color: {0.129412, 0.501961, 0.254902}, text: "Key leaf", underlined: true}, {color: {0.129412, 0.501961, 0.254902}, text: "
"}, {color: {0.549020, 0.486275, 0.133333}, text: "Not config"}}, text placement: top, origin: {2366.929155, 57.937008}, vertical padding: 0}
assemble graphics -2 through -1 table shape { 2, 1 }
assemble graphics -5 through -1
""" %(name, name))
def post_process(fd, ctx):
for s in leafrefs:
# dont try to connect to class not given as input to pyang
if s.strip().split(" to ")[1].split(" with ")[0] in paths_in_module:
fd.write(s)
def print_omni_footer(modules, fd, path, ctx):
fd.write("""
layout
end tell
end tell
""")
def print_module_info(module, fd, ctx):
title = module.arg
print_text(title, fd, ctx)
def emit_modules(modules, fd, path, ctx):
for module in modules:
print_module_info(module, fd, ctx)
chs = [ch for ch in module.i_children]
if path is not None and len(path) > 0:
chs = [ch for ch in chs
if ch.arg == path[0]]
path = path[1:]
# TEST
for ch in chs:
print_node(module, ch, module, fd, path, ctx, 'true')
for augment in module.search('augment'):
print_node(module, augment, module, fd, path, ctx, 'true')
def iterate_children(parent, s, module, fd, path, ctx):
if hasattr(s, 'i_children'):
for ch in s.i_children:
print_node(s, ch, module, fd, path, ctx)
def print_class_header(s, fd, ctx, root='false'):
global servicepoints
service = ""
for sub in s.substmts:
if sub.keyword[1] in servicepoints:
service = "SERVICE\n"
fd.write("make new shape at end of graphics with properties {autosizing: full, size: {187.500000, 14.000000}, text: {{alignment: center, font: \"Helvetica-Bold\", text: \"%s \"}, {alignment: center, color:%s, font: \"Helvetica-Bold\", text: \"%s \"}}, text placement: top, origin: {150.000000, 11.500000}, vertical padding: 0}\n" %(service + s.keyword, classnamecolor, s.arg))
def print_class_stuff(s, fd, ctx):
number = print_attributes(s, fd, ctx)
#print_actions(s,fd, ctx)
close_class(number, s, fd, ctx)
print_associations(s,fd, ctx)
def print_attributes(s,fd, ctx):
global key
if s.keyword == 'list':
keystring = s.search_one('key')
if keystring is not None:
key = keystring.arg.split(" ")
else:
key = ''
if hasattr(s, 'i_children'):
found_attrs = False
found_actions = False
index = False
# Search attrs
for ch in s.i_children:
index = False
if ch.keyword in ["leaf", "leaf-list"]:
if not found_attrs:
# first attr in attr section
fd.write("make new shape at end of graphics with properties {autosizing:full, size:{187.5, 28.0}, text:{")
found_attrs = True
else:
# comma before new textitem
fd.write(", ")
if ch.keyword == "leaf-list":
append = "[]"
else:
append = ""
if ch.arg in key:
index = True
print_leaf(ch, append, index, fd, ctx)
if found_attrs:
# close attr section
fd.write("}, text placement:top, origin:{150.0, 25.5}, vertical padding:0}\n")
# Search actions
for ch in s.i_children:
if ch.keyword == ('tailf-common', 'action'):
if not found_actions:
fd.write("make new shape at end of graphics with properties {autosizing:full, size:{187.5, 28.0}, text:{text:\"")
found_actions = True
print_action(ch, fd, ctx)
if found_actions:
fd.write("\"}, text placement:top, origin:{150.0, 25.5}, vertical padding:0}\n")
# return number of sections in class
return (found_attrs + found_actions) + 1
def close_class(number, s, fd, ctx):
fd.write("local %s\n" % fullpath(s))
fd.write("set %s to assemble ( graphics -%s through -1 ) table shape {%s, 1}\n"
% (fullpath(s), number, number))
def print_node(parent, s, module, fd, path, ctx, root='false'):
# We have a class
if s.keyword in class_keywords:
print_class_header(s, fd, ctx, root)
paths_in_module.append(fullpath(s))
print_class_stuff(s, fd, ctx)
# Do not try to create relationship to module
if parent != module:
presence = s.search_one("presence")
if presence is not None:
print_aggregation(parent, s, fd, "0", "1", ctx)
else:
print_aggregation(parent, s, fd, "1", "1", ctx)
iterate_children(parent, s, module, fd, path, ctx)
def print_associations(s, fd, ctx):
# find leafrefs and identityrefs
if hasattr(s, 'i_children'):
for ch in s.i_children:
if hasattr(ch, 'i_leafref_ptr') and (ch.i_leafref_ptr is not None):
to = ch.i_leafref_ptr[0]
print_association(s, to.parent, ch, to, "leafref", fd, ctx)
def print_aggregation(parent, this, fd, lower, upper, ctx):
fd.write("connect %s to %s with properties {%s} \n" %(fullpath(parent),fullpath(this), containsline))
def print_rpc(rpc, fd, ctx, root='false'):
fd.write("<UML:Class xmi.id = \'%s\' name = \'%s\' " %(fullpath(rpc), rpc.arg))
def print_action(action, fd, ctx, root='false'):
fd.write("%s()\n" %action.arg)
def print_notification(notification, fd, ctx, root='false'):
fd.write("<UML:Class xmi.id = \'%s\' name = \'%s\' " %(fullpath(notification), notification.arg))
def print_inout(parent, s, fd, ctx, root='false'):
fd.write("<UML:Class xmi.id = \'%s\' name = \'%s-%s\' " %(fullpath(s), parent.arg, s.keyword))
def print_leaf(leaf, append, index, fd, ctx):
if leaf.i_config:
c = '(rw)'
color = optionalconfig
else:
c = '(ro)'
color = notconfig
m = leaf.search_one('mandatory')
if m is None or m.arg == 'false':
mand = '?'
else:
mand = ''
color = mandatoryconfig
if not index:
fd.write("{font: \"Helvetica-Oblique\", color: %s, text: \"%s%s%s %s %s\n\"}"
% (color, leaf.arg, append, mand, c, get_typename(leaf)))
else:
fd.write("{font: \"Helvetica-Oblique\", color: %s, underlined: true, text: \"%s%s%s %s %s\n\"}"
% (color, leaf.arg, append, mand, c, get_typename(leaf)))
def print_association(fromclass, toclass, fromleaf, toleaf, association, fd, ctx):
leafrefs.append("connect " + (fullpath(fromclass)) + " to " + fullpath(toclass) + " with properties {" + leafrefline + "}\n", )
def print_text(t, fd, ctx):
fd.write("make new shape at end of graphics with properties {fill: no fill, draws stroke: false, draws shadow: false, autosizing: full, size: {57.000000, 30.000000}, text: {size: 16, alignment: center, font: \"HelveticaNeue\", text: \"%s\"}, origin: {100, 4.500000}}\n" %t)
def get_typename(s):
t = s.search_one('type')
if t is not None:
s = t.arg
# if t.arg == 'enumeration':
# s = s + ' : {'
# for enums in t.substmts[:10]:
# s = s + enums.arg + ','
# if len(t.substmts) > 3:
# s = s + "..."
# s = s + '}'
# elif t.arg == 'leafref':
# s = s + ' : '
# p = t.search_one('path')
# if p is not None:
# s = s + p.arg
return s
def fullpath(stmt):
pathsep = "_"
path = stmt.arg
# for augment paths we need to remove initial /
if path.startswith("/"):
path = path[1:]
else:
if stmt.keyword == 'case':
path = path + '-case'
elif stmt.keyword == 'grouping':
path = path + '-grouping'
while stmt.parent is not None:
stmt = stmt.parent
if stmt.arg is not None:
path = stmt.arg + pathsep + path
path = path.replace('-', '_')
path = path.replace(':', '_')
path = path.replace('/', '_')
return path
|
isc
|
be68baeefe1ad69a4479aae3892de1f6
| 36.780952
| 380
| 0.580035
| 3.356176
| false
| false
| false
| false
|
mbj4668/pyang
|
pyang/yacc.py
|
1
|
137902
|
# -----------------------------------------------------------------------------
# ply: yacc.py
#
# Copyright (C) 2001-2019
# David M. Beazley (Dabeaz LLC)
# All rights reserved.
#
# Latest version: https://github.com/dabeaz/ply
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# * Neither the name of David Beazley or Dabeaz LLC may be used to
# endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# -----------------------------------------------------------------------------
#
# This implements an LR parser that is constructed from grammar rules defined
# as Python functions. The grammar is specified by supplying the BNF inside
# Python documentation strings. The inspiration for this technique was borrowed
# from John Aycock's Spark parsing system. PLY might be viewed as cross between
# Spark and the GNU bison utility.
#
# The current implementation is only somewhat object-oriented. The
# LR parser itself is defined in terms of an object (which allows multiple
# parsers to co-exist). However, most of the variables used during table
# construction are defined in terms of global variables. Users shouldn't
# notice unless they are trying to define multiple parsers at the same
# time using threads (in which case they should have their head examined).
#
# This implementation supports both SLR and LALR(1) parsing. LALR(1)
# support was originally implemented by Elias Ioup (ezioup@alumni.uchicago.edu),
# using the algorithm found in Aho, Sethi, and Ullman "Compilers: Principles,
# Techniques, and Tools" (The Dragon Book). LALR(1) has since been replaced
# by the more efficient DeRemer and Pennello algorithm.
#
# :::::::: WARNING :::::::
#
# Construction of LR parsing tables is fairly complicated and expensive.
# To make this module run fast, a *LOT* of work has been put into
# optimization---often at the expensive of readability and what might
# consider to be good Python "coding style." Modify the code at your
# own risk!
# ----------------------------------------------------------------------------
from __future__ import absolute_import # mbj: handle 'types' name collision
import re
import types
import sys
import os.path
import inspect
import warnings
__version__ = '3.11'
__tabversion__ = '3.10'
#-----------------------------------------------------------------------------
# === User configurable parameters ===
#
# Change these to modify the default behavior of yacc (if you wish)
#-----------------------------------------------------------------------------
yaccdebug = True # Debugging mode. If set, yacc generates a
# a 'parser.out' file in the current directory
debug_file = 'parser.out' # Default name of the debugging file
tab_module = 'parsetab' # Default name of the table module
default_lr = 'LALR' # Default LR table generation method
error_count = 3 # Number of symbols that must be shifted to leave recovery mode
yaccdevel = False # Set to True if developing yacc. This turns off optimized
# implementations of certain functions.
resultlimit = 40 # Size limit of results when running in debug mode.
pickle_protocol = 0 # Protocol to use when writing pickle files
# String type-checking compatibility
if sys.version_info[0] < 3:
string_types = basestring # noqa: pyflakes on py3 doesn't know this
else:
string_types = str
MAXINT = sys.maxsize
# This object is a stand-in for a logging object created by the
# logging module. PLY will use this by default to create things
# such as the parser.out file. If a user wants more detailed
# information, they can create their own logging object and pass
# it into PLY.
class PlyLogger(object):
def __init__(self, f):
self.f = f
def debug(self, msg, *args, **kwargs):
self.f.write((msg % args) + '\n')
info = debug
def warning(self, msg, *args, **kwargs):
self.f.write('WARNING: ' + (msg % args) + '\n')
def error(self, msg, *args, **kwargs):
self.f.write('ERROR: ' + (msg % args) + '\n')
critical = debug
# Null logger is used when no output is generated. Does nothing.
class NullLogger(object):
def __getattribute__(self, name):
return self
def __call__(self, *args, **kwargs):
return self
# Exception raised for yacc-related errors
class YaccError(Exception):
pass
# Format the result message that the parser produces when running in debug mode.
def format_result(r):
repr_str = repr(r)
if '\n' in repr_str:
repr_str = repr(repr_str)
if len(repr_str) > resultlimit:
repr_str = repr_str[:resultlimit] + ' ...'
result = '<%s @ 0x%x> (%s)' % (type(r).__name__, id(r), repr_str)
return result
# Format stack entries when the parser is running in debug mode
def format_stack_entry(r):
repr_str = repr(r)
if '\n' in repr_str:
repr_str = repr(repr_str)
if len(repr_str) < 16:
return repr_str
else:
return '<%s @ 0x%x>' % (type(r).__name__, id(r))
# Panic mode error recovery support. This feature is being reworked--much of the
# code here is to offer a deprecation/backwards compatible transition
_errok = None
_token = None
_restart = None
_warnmsg = '''PLY: Don't use global functions errok(), token(), and restart() in p_error().
Instead, invoke the methods on the associated parser instance:
def p_error(p):
...
# Use parser.errok(), parser.token(), parser.restart()
...
parser = yacc.yacc()
'''
def errok():
warnings.warn(_warnmsg)
return _errok()
def restart():
warnings.warn(_warnmsg)
return _restart()
def token():
warnings.warn(_warnmsg)
return _token()
# Utility function to call the p_error() function with some deprecation hacks
def call_errorfunc(errorfunc, token, parser):
global _errok, _token, _restart
_errok = parser.errok
_token = parser.token
_restart = parser.restart
r = errorfunc(token)
try:
del _errok, _token, _restart
except NameError:
pass
return r
#-----------------------------------------------------------------------------
# === LR Parsing Engine ===
#
# The following classes are used for the LR parser itself. These are not
# used during table construction and are independent of the actual LR
# table generation algorithm
#-----------------------------------------------------------------------------
# This class is used to hold non-terminal grammar symbols during parsing.
# It normally has the following attributes set:
# .type = Grammar symbol type
# .value = Symbol value
# .lineno = Starting line number
# .endlineno = Ending line number (optional, set automatically)
# .lexpos = Starting lex position
# .endlexpos = Ending lex position (optional, set automatically)
class YaccSymbol:
def __str__(self):
return self.type
def __repr__(self):
return str(self)
# This class is a wrapper around the objects actually passed to each
# grammar rule. Index lookup and assignment actually assign the
# .value attribute of the underlying YaccSymbol object.
# The lineno() method returns the line number of a given
# item (or 0 if not defined). The linespan() method returns
# a tuple of (startline,endline) representing the range of lines
# for a symbol. The lexspan() method returns a tuple (lexpos,endlexpos)
# representing the range of positional information for a symbol.
class YaccProduction:
def __init__(self, s, stack=None):
self.slice = s
self.stack = stack
self.lexer = None
self.parser = None
def __getitem__(self, n):
if isinstance(n, slice):
return [s.value for s in self.slice[n]]
elif n >= 0:
return self.slice[n].value
else:
return self.stack[n].value
def __setitem__(self, n, v):
self.slice[n].value = v
def __getslice__(self, i, j):
return [s.value for s in self.slice[i:j]]
def __len__(self):
return len(self.slice)
def lineno(self, n):
return getattr(self.slice[n], 'lineno', 0)
def set_lineno(self, n, lineno):
self.slice[n].lineno = lineno
def linespan(self, n):
startline = getattr(self.slice[n], 'lineno', 0)
endline = getattr(self.slice[n], 'endlineno', startline)
return startline, endline
def lexpos(self, n):
return getattr(self.slice[n], 'lexpos', 0)
def set_lexpos(self, n, lexpos):
self.slice[n].lexpos = lexpos
def lexspan(self, n):
startpos = getattr(self.slice[n], 'lexpos', 0)
endpos = getattr(self.slice[n], 'endlexpos', startpos)
return startpos, endpos
def error(self):
raise SyntaxError
# -----------------------------------------------------------------------------
# == LRParser ==
#
# The LR Parsing engine.
# -----------------------------------------------------------------------------
class LRParser:
def __init__(self, lrtab, errorf):
self.productions = lrtab.lr_productions
self.action = lrtab.lr_action
self.goto = lrtab.lr_goto
self.errorfunc = errorf
self.set_defaulted_states()
self.errorok = True
def errok(self):
self.errorok = True
def restart(self):
del self.statestack[:]
del self.symstack[:]
sym = YaccSymbol()
sym.type = '$end'
self.symstack.append(sym)
self.statestack.append(0)
# Defaulted state support.
# This method identifies parser states where there is only one possible reduction action.
# For such states, the parser can make a choose to make a rule reduction without consuming
# the next look-ahead token. This delayed invocation of the tokenizer can be useful in
# certain kinds of advanced parsing situations where the lexer and parser interact with
# each other or change states (i.e., manipulation of scope, lexer states, etc.).
#
# See: http://www.gnu.org/software/bison/manual/html_node/Default-Reductions.html#Default-Reductions
def set_defaulted_states(self):
self.defaulted_states = {}
for state, actions in self.action.items():
rules = list(actions.values())
if len(rules) == 1 and rules[0] < 0:
self.defaulted_states[state] = rules[0]
def disable_defaulted_states(self):
self.defaulted_states = {}
def parse(self, input=None, lexer=None, debug=False, tracking=False, tokenfunc=None):
if debug or yaccdevel:
if isinstance(debug, int):
debug = PlyLogger(sys.stderr)
return self.parsedebug(input, lexer, debug, tracking, tokenfunc)
elif tracking:
return self.parseopt(input, lexer, debug, tracking, tokenfunc)
else:
return self.parseopt_notrack(input, lexer, debug, tracking, tokenfunc)
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
# parsedebug().
#
# This is the debugging enabled version of parse(). All changes made to the
# parsing engine should be made here. Optimized versions of this function
# are automatically created by the ply/ygen.py script. This script cuts out
# sections enclosed in markers such as this:
#
# #--! DEBUG
# statements
# #--! DEBUG
#
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
def parsedebug(self, input=None, lexer=None, debug=False, tracking=False, tokenfunc=None):
#--! parsedebug-start
lookahead = None # Current lookahead symbol
lookaheadstack = [] # Stack of lookahead symbols
actions = self.action # Local reference to action table (to avoid lookup on self.)
goto = self.goto # Local reference to goto table (to avoid lookup on self.)
prod = self.productions # Local reference to production list (to avoid lookup on self.)
defaulted_states = self.defaulted_states # Local reference to defaulted states
pslice = YaccProduction(None) # Production object passed to grammar rules
errorcount = 0 # Used during error recovery
#--! DEBUG
debug.info('PLY: PARSE DEBUG START')
#--! DEBUG
# If no lexer was given, we will try to use the lex module
if not lexer:
from . import lex
lexer = lex.lexer
# Set up the lexer and parser objects on pslice
pslice.lexer = lexer
pslice.parser = self
# If input was supplied, pass to lexer
if input is not None:
lexer.input(input)
if tokenfunc is None:
# Tokenize function
get_token = lexer.token
else:
get_token = tokenfunc
# Set the parser() token method (sometimes used in error recovery)
self.token = get_token
# Set up the state and symbol stacks
statestack = [] # Stack of parsing states
self.statestack = statestack
symstack = [] # Stack of grammar symbols
self.symstack = symstack
pslice.stack = symstack # Put in the production
errtoken = None # Err token
# The start state is assumed to be (0,$end)
statestack.append(0)
sym = YaccSymbol()
sym.type = '$end'
symstack.append(sym)
state = 0
while True:
# Get the next symbol on the input. If a lookahead symbol
# is already set, we just use that. Otherwise, we'll pull
# the next token off of the lookaheadstack or from the lexer
#--! DEBUG
debug.debug('')
debug.debug('State : %s', state)
#--! DEBUG
if state not in defaulted_states:
if not lookahead:
if not lookaheadstack:
lookahead = get_token() # Get the next token
else:
lookahead = lookaheadstack.pop()
if not lookahead:
lookahead = YaccSymbol()
lookahead.type = '$end'
# Check the action table
ltype = lookahead.type
t = actions[state].get(ltype)
else:
t = defaulted_states[state]
#--! DEBUG
debug.debug('Defaulted state %s: Reduce using %d', state, -t)
#--! DEBUG
#--! DEBUG
debug.debug('Stack : %s',
('%s . %s' % (' '.join([xx.type for xx in symstack][1:]), str(lookahead))).lstrip())
#--! DEBUG
if t is not None:
if t > 0:
# shift a symbol on the stack
statestack.append(t)
state = t
#--! DEBUG
debug.debug('Action : Shift and goto state %s', t)
#--! DEBUG
symstack.append(lookahead)
lookahead = None
# Decrease error count on successful shift
if errorcount:
errorcount -= 1
continue
if t < 0:
# reduce a symbol on the stack, emit a production
p = prod[-t]
pname = p.name
plen = p.len
# Get production function
sym = YaccSymbol()
sym.type = pname # Production name
sym.value = None
#--! DEBUG
if plen:
debug.info('Action : Reduce rule [%s] with %s and goto state %d', p.str,
'['+','.join([format_stack_entry(_v.value) for _v in symstack[-plen:]])+']',
goto[statestack[-1-plen]][pname])
else:
debug.info('Action : Reduce rule [%s] with %s and goto state %d', p.str, [],
goto[statestack[-1]][pname])
#--! DEBUG
if plen:
targ = symstack[-plen-1:]
targ[0] = sym
#--! TRACKING
if tracking:
t1 = targ[1]
sym.lineno = t1.lineno
sym.lexpos = t1.lexpos
t1 = targ[-1]
sym.endlineno = getattr(t1, 'endlineno', t1.lineno)
sym.endlexpos = getattr(t1, 'endlexpos', t1.lexpos)
#--! TRACKING
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
# The code enclosed in this section is duplicated
# below as a performance optimization. Make sure
# changes get made in both locations.
pslice.slice = targ
try:
# Call the grammar rule with our special slice object
del symstack[-plen:]
self.state = state
p.callable(pslice)
del statestack[-plen:]
#--! DEBUG
debug.info('Result : %s', format_result(pslice[0]))
#--! DEBUG
symstack.append(sym)
state = goto[statestack[-1]][pname]
statestack.append(state)
except SyntaxError:
# If an error was set. Enter error recovery state
lookaheadstack.append(lookahead) # Save the current lookahead token
symstack.extend(targ[1:-1]) # Put the production slice back on the stack
statestack.pop() # Pop back one state (before the reduce)
state = statestack[-1]
sym.type = 'error'
sym.value = 'error'
lookahead = sym
errorcount = error_count
self.errorok = False
continue
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
else:
#--! TRACKING
if tracking:
sym.lineno = lexer.lineno
sym.lexpos = lexer.lexpos
#--! TRACKING
targ = [sym]
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
# The code enclosed in this section is duplicated
# above as a performance optimization. Make sure
# changes get made in both locations.
pslice.slice = targ
try:
# Call the grammar rule with our special slice object
self.state = state
p.callable(pslice)
#--! DEBUG
debug.info('Result : %s', format_result(pslice[0]))
#--! DEBUG
symstack.append(sym)
state = goto[statestack[-1]][pname]
statestack.append(state)
except SyntaxError:
# If an error was set. Enter error recovery state
lookaheadstack.append(lookahead) # Save the current lookahead token
statestack.pop() # Pop back one state (before the reduce)
state = statestack[-1]
sym.type = 'error'
sym.value = 'error'
lookahead = sym
errorcount = error_count
self.errorok = False
continue
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
if t == 0:
n = symstack[-1]
result = getattr(n, 'value', None)
#--! DEBUG
debug.info('Done : Returning %s', format_result(result))
debug.info('PLY: PARSE DEBUG END')
#--! DEBUG
return result
if t is None:
#--! DEBUG
debug.error('Error : %s',
('%s . %s' % (' '.join([xx.type for xx in symstack][1:]), str(lookahead))).lstrip())
#--! DEBUG
# We have some kind of parsing error here. To handle
# this, we are going to push the current token onto
# the tokenstack and replace it with an 'error' token.
# If there are any synchronization rules, they may
# catch it.
#
# In addition to pushing the error token, we call call
# the user defined p_error() function if this is the
# first syntax error. This function is only called if
# errorcount == 0.
if errorcount == 0 or self.errorok:
errorcount = error_count
self.errorok = False
errtoken = lookahead
if errtoken.type == '$end':
errtoken = None # End of file!
if self.errorfunc:
if errtoken and not hasattr(errtoken, 'lexer'):
errtoken.lexer = lexer
self.state = state
tok = call_errorfunc(self.errorfunc, errtoken, self)
if self.errorok:
# User must have done some kind of panic
# mode recovery on their own. The
# returned token is the next lookahead
lookahead = tok
errtoken = None
continue
else:
if errtoken:
if hasattr(errtoken, 'lineno'):
lineno = lookahead.lineno
else:
lineno = 0
if lineno:
sys.stderr.write('yacc: Syntax error at line %d, token=%s\n' % (lineno, errtoken.type))
else:
sys.stderr.write('yacc: Syntax error, token=%s' % errtoken.type)
else:
sys.stderr.write('yacc: Parse error in input. EOF\n')
return
else:
errorcount = error_count
# case 1: the statestack only has 1 entry on it. If we're in this state, the
# entire parse has been rolled back and we're completely hosed. The token is
# discarded and we just keep going.
if len(statestack) <= 1 and lookahead.type != '$end':
lookahead = None
errtoken = None
state = 0
# Nuke the pushback stack
del lookaheadstack[:]
continue
# case 2: the statestack has a couple of entries on it, but we're
# at the end of the file. nuke the top entry and generate an error token
# Start nuking entries on the stack
if lookahead.type == '$end':
# Whoa. We're really hosed here. Bail out
return
if lookahead.type != 'error':
sym = symstack[-1]
if sym.type == 'error':
# Hmmm. Error is on top of stack, we'll just nuke input
# symbol and continue
#--! TRACKING
if tracking:
sym.endlineno = getattr(lookahead, 'lineno', sym.lineno)
sym.endlexpos = getattr(lookahead, 'lexpos', sym.lexpos)
#--! TRACKING
lookahead = None
continue
# Create the error symbol for the first time and make it the new lookahead symbol
t = YaccSymbol()
t.type = 'error'
if hasattr(lookahead, 'lineno'):
t.lineno = t.endlineno = lookahead.lineno
if hasattr(lookahead, 'lexpos'):
t.lexpos = t.endlexpos = lookahead.lexpos
t.value = lookahead
lookaheadstack.append(lookahead)
lookahead = t
else:
sym = symstack.pop()
#--! TRACKING
if tracking:
lookahead.lineno = sym.lineno
lookahead.lexpos = sym.lexpos
#--! TRACKING
statestack.pop()
state = statestack[-1]
continue
# Call an error function here
raise RuntimeError('yacc: internal parser error!!!\n')
#--! parsedebug-end
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
# parseopt().
#
# Optimized version of parse() method. DO NOT EDIT THIS CODE DIRECTLY!
# This code is automatically generated by the ply/ygen.py script. Make
# changes to the parsedebug() method instead.
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
def parseopt(self, input=None, lexer=None, debug=False, tracking=False, tokenfunc=None):
#--! parseopt-start
lookahead = None # Current lookahead symbol
lookaheadstack = [] # Stack of lookahead symbols
actions = self.action # Local reference to action table (to avoid lookup on self.)
goto = self.goto # Local reference to goto table (to avoid lookup on self.)
prod = self.productions # Local reference to production list (to avoid lookup on self.)
defaulted_states = self.defaulted_states # Local reference to defaulted states
pslice = YaccProduction(None) # Production object passed to grammar rules
errorcount = 0 # Used during error recovery
# If no lexer was given, we will try to use the lex module
if not lexer:
from . import lex
lexer = lex.lexer
# Set up the lexer and parser objects on pslice
pslice.lexer = lexer
pslice.parser = self
# If input was supplied, pass to lexer
if input is not None:
lexer.input(input)
if tokenfunc is None:
# Tokenize function
get_token = lexer.token
else:
get_token = tokenfunc
# Set the parser() token method (sometimes used in error recovery)
self.token = get_token
# Set up the state and symbol stacks
statestack = [] # Stack of parsing states
self.statestack = statestack
symstack = [] # Stack of grammar symbols
self.symstack = symstack
pslice.stack = symstack # Put in the production
errtoken = None # Err token
# The start state is assumed to be (0,$end)
statestack.append(0)
sym = YaccSymbol()
sym.type = '$end'
symstack.append(sym)
state = 0
while True:
# Get the next symbol on the input. If a lookahead symbol
# is already set, we just use that. Otherwise, we'll pull
# the next token off of the lookaheadstack or from the lexer
if state not in defaulted_states:
if not lookahead:
if not lookaheadstack:
lookahead = get_token() # Get the next token
else:
lookahead = lookaheadstack.pop()
if not lookahead:
lookahead = YaccSymbol()
lookahead.type = '$end'
# Check the action table
ltype = lookahead.type
t = actions[state].get(ltype)
else:
t = defaulted_states[state]
if t is not None:
if t > 0:
# shift a symbol on the stack
statestack.append(t)
state = t
symstack.append(lookahead)
lookahead = None
# Decrease error count on successful shift
if errorcount:
errorcount -= 1
continue
if t < 0:
# reduce a symbol on the stack, emit a production
p = prod[-t]
pname = p.name
plen = p.len
# Get production function
sym = YaccSymbol()
sym.type = pname # Production name
sym.value = None
if plen:
targ = symstack[-plen-1:]
targ[0] = sym
#--! TRACKING
if tracking:
t1 = targ[1]
sym.lineno = t1.lineno
sym.lexpos = t1.lexpos
t1 = targ[-1]
sym.endlineno = getattr(t1, 'endlineno', t1.lineno)
sym.endlexpos = getattr(t1, 'endlexpos', t1.lexpos)
#--! TRACKING
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
# The code enclosed in this section is duplicated
# below as a performance optimization. Make sure
# changes get made in both locations.
pslice.slice = targ
try:
# Call the grammar rule with our special slice object
del symstack[-plen:]
self.state = state
p.callable(pslice)
del statestack[-plen:]
symstack.append(sym)
state = goto[statestack[-1]][pname]
statestack.append(state)
except SyntaxError:
# If an error was set. Enter error recovery state
lookaheadstack.append(lookahead) # Save the current lookahead token
symstack.extend(targ[1:-1]) # Put the production slice back on the stack
statestack.pop() # Pop back one state (before the reduce)
state = statestack[-1]
sym.type = 'error'
sym.value = 'error'
lookahead = sym
errorcount = error_count
self.errorok = False
continue
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
else:
#--! TRACKING
if tracking:
sym.lineno = lexer.lineno
sym.lexpos = lexer.lexpos
#--! TRACKING
targ = [sym]
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
# The code enclosed in this section is duplicated
# above as a performance optimization. Make sure
# changes get made in both locations.
pslice.slice = targ
try:
# Call the grammar rule with our special slice object
self.state = state
p.callable(pslice)
symstack.append(sym)
state = goto[statestack[-1]][pname]
statestack.append(state)
except SyntaxError:
# If an error was set. Enter error recovery state
lookaheadstack.append(lookahead) # Save the current lookahead token
statestack.pop() # Pop back one state (before the reduce)
state = statestack[-1]
sym.type = 'error'
sym.value = 'error'
lookahead = sym
errorcount = error_count
self.errorok = False
continue
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
if t == 0:
n = symstack[-1]
result = getattr(n, 'value', None)
return result
if t is None:
# We have some kind of parsing error here. To handle
# this, we are going to push the current token onto
# the tokenstack and replace it with an 'error' token.
# If there are any synchronization rules, they may
# catch it.
#
# In addition to pushing the error token, we call call
# the user defined p_error() function if this is the
# first syntax error. This function is only called if
# errorcount == 0.
if errorcount == 0 or self.errorok:
errorcount = error_count
self.errorok = False
errtoken = lookahead
if errtoken.type == '$end':
errtoken = None # End of file!
if self.errorfunc:
if errtoken and not hasattr(errtoken, 'lexer'):
errtoken.lexer = lexer
self.state = state
tok = call_errorfunc(self.errorfunc, errtoken, self)
if self.errorok:
# User must have done some kind of panic
# mode recovery on their own. The
# returned token is the next lookahead
lookahead = tok
errtoken = None
continue
else:
if errtoken:
if hasattr(errtoken, 'lineno'):
lineno = lookahead.lineno
else:
lineno = 0
if lineno:
sys.stderr.write('yacc: Syntax error at line %d, token=%s\n' % (lineno, errtoken.type))
else:
sys.stderr.write('yacc: Syntax error, token=%s' % errtoken.type)
else:
sys.stderr.write('yacc: Parse error in input. EOF\n')
return
else:
errorcount = error_count
# case 1: the statestack only has 1 entry on it. If we're in this state, the
# entire parse has been rolled back and we're completely hosed. The token is
# discarded and we just keep going.
if len(statestack) <= 1 and lookahead.type != '$end':
lookahead = None
errtoken = None
state = 0
# Nuke the pushback stack
del lookaheadstack[:]
continue
# case 2: the statestack has a couple of entries on it, but we're
# at the end of the file. nuke the top entry and generate an error token
# Start nuking entries on the stack
if lookahead.type == '$end':
# Whoa. We're really hosed here. Bail out
return
if lookahead.type != 'error':
sym = symstack[-1]
if sym.type == 'error':
# Hmmm. Error is on top of stack, we'll just nuke input
# symbol and continue
#--! TRACKING
if tracking:
sym.endlineno = getattr(lookahead, 'lineno', sym.lineno)
sym.endlexpos = getattr(lookahead, 'lexpos', sym.lexpos)
#--! TRACKING
lookahead = None
continue
# Create the error symbol for the first time and make it the new lookahead symbol
t = YaccSymbol()
t.type = 'error'
if hasattr(lookahead, 'lineno'):
t.lineno = t.endlineno = lookahead.lineno
if hasattr(lookahead, 'lexpos'):
t.lexpos = t.endlexpos = lookahead.lexpos
t.value = lookahead
lookaheadstack.append(lookahead)
lookahead = t
else:
sym = symstack.pop()
#--! TRACKING
if tracking:
lookahead.lineno = sym.lineno
lookahead.lexpos = sym.lexpos
#--! TRACKING
statestack.pop()
state = statestack[-1]
continue
# Call an error function here
raise RuntimeError('yacc: internal parser error!!!\n')
#--! parseopt-end
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
# parseopt_notrack().
#
# Optimized version of parseopt() with line number tracking removed.
# DO NOT EDIT THIS CODE DIRECTLY. This code is automatically generated
# by the ply/ygen.py script. Make changes to the parsedebug() method instead.
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
def parseopt_notrack(self, input=None, lexer=None, debug=False, tracking=False, tokenfunc=None):
#--! parseopt-notrack-start
lookahead = None # Current lookahead symbol
lookaheadstack = [] # Stack of lookahead symbols
actions = self.action # Local reference to action table (to avoid lookup on self.)
goto = self.goto # Local reference to goto table (to avoid lookup on self.)
prod = self.productions # Local reference to production list (to avoid lookup on self.)
defaulted_states = self.defaulted_states # Local reference to defaulted states
pslice = YaccProduction(None) # Production object passed to grammar rules
errorcount = 0 # Used during error recovery
# If no lexer was given, we will try to use the lex module
if not lexer:
from . import lex
lexer = lex.lexer
# Set up the lexer and parser objects on pslice
pslice.lexer = lexer
pslice.parser = self
# If input was supplied, pass to lexer
if input is not None:
lexer.input(input)
if tokenfunc is None:
# Tokenize function
get_token = lexer.token
else:
get_token = tokenfunc
# Set the parser() token method (sometimes used in error recovery)
self.token = get_token
# Set up the state and symbol stacks
statestack = [] # Stack of parsing states
self.statestack = statestack
symstack = [] # Stack of grammar symbols
self.symstack = symstack
pslice.stack = symstack # Put in the production
errtoken = None # Err token
# The start state is assumed to be (0,$end)
statestack.append(0)
sym = YaccSymbol()
sym.type = '$end'
symstack.append(sym)
state = 0
while True:
# Get the next symbol on the input. If a lookahead symbol
# is already set, we just use that. Otherwise, we'll pull
# the next token off of the lookaheadstack or from the lexer
if state not in defaulted_states:
if not lookahead:
if not lookaheadstack:
lookahead = get_token() # Get the next token
else:
lookahead = lookaheadstack.pop()
if not lookahead:
lookahead = YaccSymbol()
lookahead.type = '$end'
# Check the action table
ltype = lookahead.type
t = actions[state].get(ltype)
else:
t = defaulted_states[state]
if t is not None:
if t > 0:
# shift a symbol on the stack
statestack.append(t)
state = t
symstack.append(lookahead)
lookahead = None
# Decrease error count on successful shift
if errorcount:
errorcount -= 1
continue
if t < 0:
# reduce a symbol on the stack, emit a production
p = prod[-t]
pname = p.name
plen = p.len
# Get production function
sym = YaccSymbol()
sym.type = pname # Production name
sym.value = None
if plen:
targ = symstack[-plen-1:]
targ[0] = sym
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
# The code enclosed in this section is duplicated
# below as a performance optimization. Make sure
# changes get made in both locations.
pslice.slice = targ
try:
# Call the grammar rule with our special slice object
del symstack[-plen:]
self.state = state
p.callable(pslice)
del statestack[-plen:]
symstack.append(sym)
state = goto[statestack[-1]][pname]
statestack.append(state)
except SyntaxError:
# If an error was set. Enter error recovery state
lookaheadstack.append(lookahead) # Save the current lookahead token
symstack.extend(targ[1:-1]) # Put the production slice back on the stack
statestack.pop() # Pop back one state (before the reduce)
state = statestack[-1]
sym.type = 'error'
sym.value = 'error'
lookahead = sym
errorcount = error_count
self.errorok = False
continue
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
else:
targ = [sym]
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
# The code enclosed in this section is duplicated
# above as a performance optimization. Make sure
# changes get made in both locations.
pslice.slice = targ
try:
# Call the grammar rule with our special slice object
self.state = state
p.callable(pslice)
symstack.append(sym)
state = goto[statestack[-1]][pname]
statestack.append(state)
except SyntaxError:
# If an error was set. Enter error recovery state
lookaheadstack.append(lookahead) # Save the current lookahead token
statestack.pop() # Pop back one state (before the reduce)
state = statestack[-1]
sym.type = 'error'
sym.value = 'error'
lookahead = sym
errorcount = error_count
self.errorok = False
continue
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
if t == 0:
n = symstack[-1]
result = getattr(n, 'value', None)
return result
if t is None:
# We have some kind of parsing error here. To handle
# this, we are going to push the current token onto
# the tokenstack and replace it with an 'error' token.
# If there are any synchronization rules, they may
# catch it.
#
# In addition to pushing the error token, we call call
# the user defined p_error() function if this is the
# first syntax error. This function is only called if
# errorcount == 0.
if errorcount == 0 or self.errorok:
errorcount = error_count
self.errorok = False
errtoken = lookahead
if errtoken.type == '$end':
errtoken = None # End of file!
if self.errorfunc:
if errtoken and not hasattr(errtoken, 'lexer'):
errtoken.lexer = lexer
self.state = state
tok = call_errorfunc(self.errorfunc, errtoken, self)
if self.errorok:
# User must have done some kind of panic
# mode recovery on their own. The
# returned token is the next lookahead
lookahead = tok
errtoken = None
continue
else:
if errtoken:
if hasattr(errtoken, 'lineno'):
lineno = lookahead.lineno
else:
lineno = 0
if lineno:
sys.stderr.write('yacc: Syntax error at line %d, token=%s\n' % (lineno, errtoken.type))
else:
sys.stderr.write('yacc: Syntax error, token=%s' % errtoken.type)
else:
sys.stderr.write('yacc: Parse error in input. EOF\n')
return
else:
errorcount = error_count
# case 1: the statestack only has 1 entry on it. If we're in this state, the
# entire parse has been rolled back and we're completely hosed. The token is
# discarded and we just keep going.
if len(statestack) <= 1 and lookahead.type != '$end':
lookahead = None
errtoken = None
state = 0
# Nuke the pushback stack
del lookaheadstack[:]
continue
# case 2: the statestack has a couple of entries on it, but we're
# at the end of the file. nuke the top entry and generate an error token
# Start nuking entries on the stack
if lookahead.type == '$end':
# Whoa. We're really hosed here. Bail out
return
if lookahead.type != 'error':
sym = symstack[-1]
if sym.type == 'error':
# Hmmm. Error is on top of stack, we'll just nuke input
# symbol and continue
lookahead = None
continue
# Create the error symbol for the first time and make it the new lookahead symbol
t = YaccSymbol()
t.type = 'error'
if hasattr(lookahead, 'lineno'):
t.lineno = t.endlineno = lookahead.lineno
if hasattr(lookahead, 'lexpos'):
t.lexpos = t.endlexpos = lookahead.lexpos
t.value = lookahead
lookaheadstack.append(lookahead)
lookahead = t
else:
sym = symstack.pop()
statestack.pop()
state = statestack[-1]
continue
# Call an error function here
raise RuntimeError('yacc: internal parser error!!!\n')
#--! parseopt-notrack-end
# -----------------------------------------------------------------------------
# === Grammar Representation ===
#
# The following functions, classes, and variables are used to represent and
# manipulate the rules that make up a grammar.
# -----------------------------------------------------------------------------
# regex matching identifiers
_is_identifier = re.compile(r'^[a-zA-Z0-9_-]+$')
# -----------------------------------------------------------------------------
# class Production:
#
# This class stores the raw information about a single production or grammar rule.
# A grammar rule refers to a specification such as this:
#
# expr : expr PLUS term
#
# Here are the basic attributes defined on all productions
#
# name - Name of the production. For example 'expr'
# prod - A list of symbols on the right side ['expr','PLUS','term']
# prec - Production precedence level
# number - Production number.
# func - Function that executes on reduce
# file - File where production function is defined
# lineno - Line number where production function is defined
#
# The following attributes are defined or optional.
#
# len - Length of the production (number of symbols on right hand side)
# usyms - Set of unique symbols found in the production
# -----------------------------------------------------------------------------
class Production(object):
reduced = 0
def __init__(self, number, name, prod, precedence=('right', 0), func=None, file='', line=0):
self.name = name
self.prod = tuple(prod)
self.number = number
self.func = func
self.callable = None
self.file = file
self.line = line
self.prec = precedence
# Internal settings used during table construction
self.len = len(self.prod) # Length of the production
# Create a list of unique production symbols used in the production
self.usyms = []
for s in self.prod:
if s not in self.usyms:
self.usyms.append(s)
# List of all LR items for the production
self.lr_items = []
self.lr_next = None
# Create a string representation
if self.prod:
self.str = '%s -> %s' % (self.name, ' '.join(self.prod))
else:
self.str = '%s -> <empty>' % self.name
def __str__(self):
return self.str
def __repr__(self):
return 'Production(' + str(self) + ')'
def __len__(self):
return len(self.prod)
def __nonzero__(self):
return 1
def __getitem__(self, index):
return self.prod[index]
# Return the nth lr_item from the production (or None if at the end)
def lr_item(self, n):
if n > len(self.prod):
return None
p = LRItem(self, n)
# Precompute the list of productions immediately following.
try:
p.lr_after = self.Prodnames[p.prod[n+1]]
except (IndexError, KeyError):
p.lr_after = []
try:
p.lr_before = p.prod[n-1]
except IndexError:
p.lr_before = None
return p
# Bind the production function name to a callable
def bind(self, pdict):
if self.func:
self.callable = pdict[self.func]
# This class serves as a minimal standin for Production objects when
# reading table data from files. It only contains information
# actually used by the LR parsing engine, plus some additional
# debugging information.
class MiniProduction(object):
def __init__(self, str, name, len, func, file, line):
self.name = name
self.len = len
self.func = func
self.callable = None
self.file = file
self.line = line
self.str = str
def __str__(self):
return self.str
def __repr__(self):
return 'MiniProduction(%s)' % self.str
# Bind the production function name to a callable
def bind(self, pdict):
if self.func:
self.callable = pdict[self.func]
# -----------------------------------------------------------------------------
# class LRItem
#
# This class represents a specific stage of parsing a production rule. For
# example:
#
# expr : expr . PLUS term
#
# In the above, the "." represents the current location of the parse. Here
# basic attributes:
#
# name - Name of the production. For example 'expr'
# prod - A list of symbols on the right side ['expr','.', 'PLUS','term']
# number - Production number.
#
# lr_next Next LR item. Example, if we are ' expr -> expr . PLUS term'
# then lr_next refers to 'expr -> expr PLUS . term'
# lr_index - LR item index (location of the ".") in the prod list.
# lookaheads - LALR lookahead symbols for this item
# len - Length of the production (number of symbols on right hand side)
# lr_after - List of all productions that immediately follow
# lr_before - Grammar symbol immediately before
# -----------------------------------------------------------------------------
class LRItem(object):
def __init__(self, p, n):
self.name = p.name
self.prod = list(p.prod)
self.number = p.number
self.lr_index = n
self.lookaheads = {}
self.prod.insert(n, '.')
self.prod = tuple(self.prod)
self.len = len(self.prod)
self.usyms = p.usyms
def __str__(self):
if self.prod:
s = '%s -> %s' % (self.name, ' '.join(self.prod))
else:
s = '%s -> <empty>' % self.name
return s
def __repr__(self):
return 'LRItem(' + str(self) + ')'
# -----------------------------------------------------------------------------
# rightmost_terminal()
#
# Return the rightmost terminal from a list of symbols. Used in add_production()
# -----------------------------------------------------------------------------
def rightmost_terminal(symbols, terminals):
i = len(symbols) - 1
while i >= 0:
if symbols[i] in terminals:
return symbols[i]
i -= 1
return None
# -----------------------------------------------------------------------------
# === GRAMMAR CLASS ===
#
# The following class represents the contents of the specified grammar along
# with various computed properties such as first sets, follow sets, LR items, etc.
# This data is used for critical parts of the table generation process later.
# -----------------------------------------------------------------------------
class GrammarError(YaccError):
pass
class Grammar(object):
def __init__(self, terminals):
self.Productions = [None] # A list of all of the productions. The first
# entry is always reserved for the purpose of
# building an augmented grammar
self.Prodnames = {} # A dictionary mapping the names of nonterminals to a list of all
# productions of that nonterminal.
self.Prodmap = {} # A dictionary that is only used to detect duplicate
# productions.
self.Terminals = {} # A dictionary mapping the names of terminal symbols to a
# list of the rules where they are used.
for term in terminals:
self.Terminals[term] = []
self.Terminals['error'] = []
self.Nonterminals = {} # A dictionary mapping names of nonterminals to a list
# of rule numbers where they are used.
self.First = {} # A dictionary of precomputed FIRST(x) symbols
self.Follow = {} # A dictionary of precomputed FOLLOW(x) symbols
self.Precedence = {} # Precedence rules for each terminal. Contains tuples of the
# form ('right',level) or ('nonassoc', level) or ('left',level)
self.UsedPrecedence = set() # Precedence rules that were actually used by the grammer.
# This is only used to provide error checking and to generate
# a warning about unused precedence rules.
self.Start = None # Starting symbol for the grammar
def __len__(self):
return len(self.Productions)
def __getitem__(self, index):
return self.Productions[index]
# -----------------------------------------------------------------------------
# set_precedence()
#
# Sets the precedence for a given terminal. assoc is the associativity such as
# 'left','right', or 'nonassoc'. level is a numeric level.
#
# -----------------------------------------------------------------------------
def set_precedence(self, term, assoc, level):
assert self.Productions == [None], 'Must call set_precedence() before add_production()'
if term in self.Precedence:
raise GrammarError('Precedence already specified for terminal %r' % term)
if assoc not in ['left', 'right', 'nonassoc']:
raise GrammarError("Associativity must be one of 'left','right', or 'nonassoc'")
self.Precedence[term] = (assoc, level)
# -----------------------------------------------------------------------------
# add_production()
#
# Given an action function, this function assembles a production rule and
# computes its precedence level.
#
# The production rule is supplied as a list of symbols. For example,
# a rule such as 'expr : expr PLUS term' has a production name of 'expr' and
# symbols ['expr','PLUS','term'].
#
# Precedence is determined by the precedence of the right-most non-terminal
# or the precedence of a terminal specified by %prec.
#
# A variety of error checks are performed to make sure production symbols
# are valid and that %prec is used correctly.
# -----------------------------------------------------------------------------
def add_production(self, prodname, syms, func=None, file='', line=0):
if prodname in self.Terminals:
raise GrammarError('%s:%d: Illegal rule name %r. Already defined as a token' % (file, line, prodname))
if prodname == 'error':
raise GrammarError('%s:%d: Illegal rule name %r. error is a reserved word' % (file, line, prodname))
if not _is_identifier.match(prodname):
raise GrammarError('%s:%d: Illegal rule name %r' % (file, line, prodname))
# Look for literal tokens
for n, s in enumerate(syms):
if s[0] in "'\"":
try:
c = eval(s)
if (len(c) > 1):
raise GrammarError('%s:%d: Literal token %s in rule %r may only be a single character' %
(file, line, s, prodname))
if c not in self.Terminals:
self.Terminals[c] = []
syms[n] = c
continue
except SyntaxError:
pass
if not _is_identifier.match(s) and s != '%prec':
raise GrammarError('%s:%d: Illegal name %r in rule %r' % (file, line, s, prodname))
# Determine the precedence level
if '%prec' in syms:
if syms[-1] == '%prec':
raise GrammarError('%s:%d: Syntax error. Nothing follows %%prec' % (file, line))
if syms[-2] != '%prec':
raise GrammarError('%s:%d: Syntax error. %%prec can only appear at the end of a grammar rule' %
(file, line))
precname = syms[-1]
prodprec = self.Precedence.get(precname)
if not prodprec:
raise GrammarError('%s:%d: Nothing known about the precedence of %r' % (file, line, precname))
else:
self.UsedPrecedence.add(precname)
del syms[-2:] # Drop %prec from the rule
else:
# If no %prec, precedence is determined by the rightmost terminal symbol
precname = rightmost_terminal(syms, self.Terminals)
prodprec = self.Precedence.get(precname, ('right', 0))
# See if the rule is already in the rulemap
map = '%s -> %s' % (prodname, syms)
if map in self.Prodmap:
m = self.Prodmap[map]
raise GrammarError('%s:%d: Duplicate rule %s. ' % (file, line, m) +
'Previous definition at %s:%d' % (m.file, m.line))
# From this point on, everything is valid. Create a new Production instance
pnumber = len(self.Productions)
if prodname not in self.Nonterminals:
self.Nonterminals[prodname] = []
# Add the production number to Terminals and Nonterminals
for t in syms:
if t in self.Terminals:
self.Terminals[t].append(pnumber)
else:
if t not in self.Nonterminals:
self.Nonterminals[t] = []
self.Nonterminals[t].append(pnumber)
# Create a production and add it to the list of productions
p = Production(pnumber, prodname, syms, prodprec, func, file, line)
self.Productions.append(p)
self.Prodmap[map] = p
# Add to the global productions list
try:
self.Prodnames[prodname].append(p)
except KeyError:
self.Prodnames[prodname] = [p]
# -----------------------------------------------------------------------------
# set_start()
#
# Sets the starting symbol and creates the augmented grammar. Production
# rule 0 is S' -> start where start is the start symbol.
# -----------------------------------------------------------------------------
def set_start(self, start=None):
if not start:
start = self.Productions[1].name
if start not in self.Nonterminals:
raise GrammarError('start symbol %s undefined' % start)
self.Productions[0] = Production(0, "S'", [start])
self.Nonterminals[start].append(0)
self.Start = start
# -----------------------------------------------------------------------------
# find_unreachable()
#
# Find all of the nonterminal symbols that can't be reached from the starting
# symbol. Returns a list of nonterminals that can't be reached.
# -----------------------------------------------------------------------------
def find_unreachable(self):
# Mark all symbols that are reachable from a symbol s
def mark_reachable_from(s):
if s in reachable:
return
reachable.add(s)
for p in self.Prodnames.get(s, []):
for r in p.prod:
mark_reachable_from(r)
reachable = set()
mark_reachable_from(self.Productions[0].prod[0])
return [s for s in self.Nonterminals if s not in reachable]
# -----------------------------------------------------------------------------
# infinite_cycles()
#
# This function looks at the various parsing rules and tries to detect
# infinite recursion cycles (grammar rules where there is no possible way
# to derive a string of only terminals).
# -----------------------------------------------------------------------------
def infinite_cycles(self):
terminates = {}
# Terminals:
for t in self.Terminals:
terminates[t] = True
terminates['$end'] = True
# Nonterminals:
# Initialize to false:
for n in self.Nonterminals:
terminates[n] = False
# Then propagate termination until no change:
while True:
some_change = False
for (n, pl) in self.Prodnames.items():
# Nonterminal n terminates iff any of its productions terminates.
for p in pl:
# Production p terminates iff all of its rhs symbols terminate.
for s in p.prod:
if not terminates[s]:
# The symbol s does not terminate,
# so production p does not terminate.
p_terminates = False
break
else:
# didn't break from the loop,
# so every symbol s terminates
# so production p terminates.
p_terminates = True
if p_terminates:
# symbol n terminates!
if not terminates[n]:
terminates[n] = True
some_change = True
# Don't need to consider any more productions for this n.
break
if not some_change:
break
infinite = []
for (s, term) in terminates.items():
if not term:
if s not in self.Prodnames and s not in self.Terminals and s != 'error':
# s is used-but-not-defined, and we've already warned of that,
# so it would be overkill to say that it's also non-terminating.
pass
else:
infinite.append(s)
return infinite
# -----------------------------------------------------------------------------
# undefined_symbols()
#
# Find all symbols that were used the grammar, but not defined as tokens or
# grammar rules. Returns a list of tuples (sym, prod) where sym in the symbol
# and prod is the production where the symbol was used.
# -----------------------------------------------------------------------------
def undefined_symbols(self):
result = []
for p in self.Productions:
if not p:
continue
for s in p.prod:
if s not in self.Prodnames and s not in self.Terminals and s != 'error':
result.append((s, p))
return result
# -----------------------------------------------------------------------------
# unused_terminals()
#
# Find all terminals that were defined, but not used by the grammar. Returns
# a list of all symbols.
# -----------------------------------------------------------------------------
def unused_terminals(self):
unused_tok = []
for s, v in self.Terminals.items():
if s != 'error' and not v:
unused_tok.append(s)
return unused_tok
# ------------------------------------------------------------------------------
# unused_rules()
#
# Find all grammar rules that were defined, but not used (maybe not reachable)
# Returns a list of productions.
# ------------------------------------------------------------------------------
def unused_rules(self):
unused_prod = []
for s, v in self.Nonterminals.items():
if not v:
p = self.Prodnames[s][0]
unused_prod.append(p)
return unused_prod
# -----------------------------------------------------------------------------
# unused_precedence()
#
# Returns a list of tuples (term,precedence) corresponding to precedence
# rules that were never used by the grammar. term is the name of the terminal
# on which precedence was applied and precedence is a string such as 'left' or
# 'right' corresponding to the type of precedence.
# -----------------------------------------------------------------------------
def unused_precedence(self):
unused = []
for termname in self.Precedence:
if not (termname in self.Terminals or termname in self.UsedPrecedence):
unused.append((termname, self.Precedence[termname][0]))
return unused
# -------------------------------------------------------------------------
# _first()
#
# Compute the value of FIRST1(beta) where beta is a tuple of symbols.
#
# During execution of compute_first1, the result may be incomplete.
# Afterward (e.g., when called from compute_follow()), it will be complete.
# -------------------------------------------------------------------------
def _first(self, beta):
# We are computing First(x1,x2,x3,...,xn)
result = []
for x in beta:
x_produces_empty = False
# Add all the non-<empty> symbols of First[x] to the result.
for f in self.First[x]:
if f == '<empty>':
x_produces_empty = True
else:
if f not in result:
result.append(f)
if x_produces_empty:
# We have to consider the next x in beta,
# i.e. stay in the loop.
pass
else:
# We don't have to consider any further symbols in beta.
break
else:
# There was no 'break' from the loop,
# so x_produces_empty was true for all x in beta,
# so beta produces empty as well.
result.append('<empty>')
return result
# -------------------------------------------------------------------------
# compute_first()
#
# Compute the value of FIRST1(X) for all symbols
# -------------------------------------------------------------------------
def compute_first(self):
if self.First:
return self.First
# Terminals:
for t in self.Terminals:
self.First[t] = [t]
self.First['$end'] = ['$end']
# Nonterminals:
# Initialize to the empty set:
for n in self.Nonterminals:
self.First[n] = []
# Then propagate symbols until no change:
while True:
some_change = False
for n in self.Nonterminals:
for p in self.Prodnames[n]:
for f in self._first(p.prod):
if f not in self.First[n]:
self.First[n].append(f)
some_change = True
if not some_change:
break
return self.First
# ---------------------------------------------------------------------
# compute_follow()
#
# Computes all of the follow sets for every non-terminal symbol. The
# follow set is the set of all symbols that might follow a given
# non-terminal. See the Dragon book, 2nd Ed. p. 189.
# ---------------------------------------------------------------------
def compute_follow(self, start=None):
# If already computed, return the result
if self.Follow:
return self.Follow
# If first sets not computed yet, do that first.
if not self.First:
self.compute_first()
# Add '$end' to the follow list of the start symbol
for k in self.Nonterminals:
self.Follow[k] = []
if not start:
start = self.Productions[1].name
self.Follow[start] = ['$end']
while True:
didadd = False
for p in self.Productions[1:]:
# Here is the production set
for i, B in enumerate(p.prod):
if B in self.Nonterminals:
# Okay. We got a non-terminal in a production
fst = self._first(p.prod[i+1:])
hasempty = False
for f in fst:
if f != '<empty>' and f not in self.Follow[B]:
self.Follow[B].append(f)
didadd = True
if f == '<empty>':
hasempty = True
if hasempty or i == (len(p.prod)-1):
# Add elements of follow(a) to follow(b)
for f in self.Follow[p.name]:
if f not in self.Follow[B]:
self.Follow[B].append(f)
didadd = True
if not didadd:
break
return self.Follow
# -----------------------------------------------------------------------------
# build_lritems()
#
# This function walks the list of productions and builds a complete set of the
# LR items. The LR items are stored in two ways: First, they are uniquely
# numbered and placed in the list _lritems. Second, a linked list of LR items
# is built for each production. For example:
#
# E -> E PLUS E
#
# Creates the list
#
# [E -> . E PLUS E, E -> E . PLUS E, E -> E PLUS . E, E -> E PLUS E . ]
# -----------------------------------------------------------------------------
def build_lritems(self):
for p in self.Productions:
lastlri = p
i = 0
lr_items = []
while True:
if i > len(p):
lri = None
else:
lri = LRItem(p, i)
# Precompute the list of productions immediately following
try:
lri.lr_after = self.Prodnames[lri.prod[i+1]]
except (IndexError, KeyError):
lri.lr_after = []
try:
lri.lr_before = lri.prod[i-1]
except IndexError:
lri.lr_before = None
lastlri.lr_next = lri
if not lri:
break
lr_items.append(lri)
lastlri = lri
i += 1
p.lr_items = lr_items
# -----------------------------------------------------------------------------
# == Class LRTable ==
#
# This basic class represents a basic table of LR parsing information.
# Methods for generating the tables are not defined here. They are defined
# in the derived class LRGeneratedTable.
# -----------------------------------------------------------------------------
class VersionError(YaccError):
pass
class LRTable(object):
def __init__(self):
self.lr_action = None
self.lr_goto = None
self.lr_productions = None
self.lr_method = None
def read_table(self, module):
if isinstance(module, types.ModuleType):
parsetab = module
else:
exec('import %s' % module)
parsetab = sys.modules[module]
if parsetab._tabversion != __tabversion__:
raise VersionError('yacc table file version is out of date')
self.lr_action = parsetab._lr_action
self.lr_goto = parsetab._lr_goto
self.lr_productions = []
for p in parsetab._lr_productions:
self.lr_productions.append(MiniProduction(*p))
self.lr_method = parsetab._lr_method
return parsetab._lr_signature
def read_pickle(self, filename):
try:
import cPickle as pickle
except ImportError:
import pickle
if not os.path.exists(filename):
raise ImportError
in_f = open(filename, 'rb')
tabversion = pickle.load(in_f)
if tabversion != __tabversion__:
raise VersionError('yacc table file version is out of date')
self.lr_method = pickle.load(in_f)
signature = pickle.load(in_f)
self.lr_action = pickle.load(in_f)
self.lr_goto = pickle.load(in_f)
productions = pickle.load(in_f)
self.lr_productions = []
for p in productions:
self.lr_productions.append(MiniProduction(*p))
in_f.close()
return signature
# Bind all production function names to callable objects in pdict
def bind_callables(self, pdict):
for p in self.lr_productions:
p.bind(pdict)
# -----------------------------------------------------------------------------
# === LR Generator ===
#
# The following classes and functions are used to generate LR parsing tables on
# a grammar.
# -----------------------------------------------------------------------------
# -----------------------------------------------------------------------------
# digraph()
# traverse()
#
# The following two functions are used to compute set valued functions
# of the form:
#
# F(x) = F'(x) U U{F(y) | x R y}
#
# This is used to compute the values of Read() sets as well as FOLLOW sets
# in LALR(1) generation.
#
# Inputs: X - An input set
# R - A relation
# FP - Set-valued function
# ------------------------------------------------------------------------------
def digraph(X, R, FP):
N = {}
for x in X:
N[x] = 0
stack = []
F = {}
for x in X:
if N[x] == 0:
traverse(x, N, stack, F, X, R, FP)
return F
def traverse(x, N, stack, F, X, R, FP):
stack.append(x)
d = len(stack)
N[x] = d
F[x] = FP(x) # F(X) <- F'(x)
rel = R(x) # Get y's related to x
for y in rel:
if N[y] == 0:
traverse(y, N, stack, F, X, R, FP)
N[x] = min(N[x], N[y])
for a in F.get(y, []):
if a not in F[x]:
F[x].append(a)
if N[x] == d:
N[stack[-1]] = MAXINT
F[stack[-1]] = F[x]
element = stack.pop()
while element != x:
N[stack[-1]] = MAXINT
F[stack[-1]] = F[x]
element = stack.pop()
class LALRError(YaccError):
pass
# -----------------------------------------------------------------------------
# == LRGeneratedTable ==
#
# This class implements the LR table generation algorithm. There are no
# public methods except for write()
# -----------------------------------------------------------------------------
class LRGeneratedTable(LRTable):
def __init__(self, grammar, method='LALR', log=None):
if method not in ['SLR', 'LALR']:
raise LALRError('Unsupported method %s' % method)
self.grammar = grammar
self.lr_method = method
# Set up the logger
if not log:
log = NullLogger()
self.log = log
# Internal attributes
self.lr_action = {} # Action table
self.lr_goto = {} # Goto table
self.lr_productions = grammar.Productions # Copy of grammar Production array
self.lr_goto_cache = {} # Cache of computed gotos
self.lr0_cidhash = {} # Cache of closures
self._add_count = 0 # Internal counter used to detect cycles
# Diagonistic information filled in by the table generator
self.sr_conflict = 0
self.rr_conflict = 0
self.conflicts = [] # List of conflicts
self.sr_conflicts = []
self.rr_conflicts = []
# Build the tables
self.grammar.build_lritems()
self.grammar.compute_first()
self.grammar.compute_follow()
self.lr_parse_table()
# Compute the LR(0) closure operation on I, where I is a set of LR(0) items.
def lr0_closure(self, I):
self._add_count += 1
# Add everything in I to J
J = I[:]
didadd = True
while didadd:
didadd = False
for j in J:
for x in j.lr_after:
if getattr(x, 'lr0_added', 0) == self._add_count:
continue
# Add B --> .G to J
J.append(x.lr_next)
x.lr0_added = self._add_count
didadd = True
return J
# Compute the LR(0) goto function goto(I,X) where I is a set
# of LR(0) items and X is a grammar symbol. This function is written
# in a way that guarantees uniqueness of the generated goto sets
# (i.e. the same goto set will never be returned as two different Python
# objects). With uniqueness, we can later do fast set comparisons using
# id(obj) instead of element-wise comparison.
def lr0_goto(self, I, x):
# First we look for a previously cached entry
g = self.lr_goto_cache.get((id(I), x))
if g:
return g
# Now we generate the goto set in a way that guarantees uniqueness
# of the result
s = self.lr_goto_cache.get(x)
if not s:
s = {}
self.lr_goto_cache[x] = s
gs = []
for p in I:
n = p.lr_next
if n and n.lr_before == x:
s1 = s.get(id(n))
if not s1:
s1 = {}
s[id(n)] = s1
gs.append(n)
s = s1
g = s.get('$end')
if not g:
if gs:
g = self.lr0_closure(gs)
s['$end'] = g
else:
s['$end'] = gs
self.lr_goto_cache[(id(I), x)] = g
return g
# Compute the LR(0) sets of item function
def lr0_items(self):
C = [self.lr0_closure([self.grammar.Productions[0].lr_next])]
i = 0
for I in C:
self.lr0_cidhash[id(I)] = i
i += 1
# Loop over the items in C and each grammar symbols
i = 0
while i < len(C):
I = C[i]
i += 1
# Collect all of the symbols that could possibly be in the goto(I,X) sets
asyms = {}
for ii in I:
for s in ii.usyms:
asyms[s] = None
for x in asyms:
g = self.lr0_goto(I, x)
if not g or id(g) in self.lr0_cidhash:
continue
self.lr0_cidhash[id(g)] = len(C)
C.append(g)
return C
# -----------------------------------------------------------------------------
# ==== LALR(1) Parsing ====
#
# LALR(1) parsing is almost exactly the same as SLR except that instead of
# relying upon Follow() sets when performing reductions, a more selective
# lookahead set that incorporates the state of the LR(0) machine is utilized.
# Thus, we mainly just have to focus on calculating the lookahead sets.
#
# The method used here is due to DeRemer and Pennelo (1982).
#
# DeRemer, F. L., and T. J. Pennelo: "Efficient Computation of LALR(1)
# Lookahead Sets", ACM Transactions on Programming Languages and Systems,
# Vol. 4, No. 4, Oct. 1982, pp. 615-649
#
# Further details can also be found in:
#
# J. Tremblay and P. Sorenson, "The Theory and Practice of Compiler Writing",
# McGraw-Hill Book Company, (1985).
#
# -----------------------------------------------------------------------------
# -----------------------------------------------------------------------------
# compute_nullable_nonterminals()
#
# Creates a dictionary containing all of the non-terminals that might produce
# an empty production.
# -----------------------------------------------------------------------------
def compute_nullable_nonterminals(self):
nullable = set()
num_nullable = 0
while True:
for p in self.grammar.Productions[1:]:
if p.len == 0:
nullable.add(p.name)
continue
for t in p.prod:
if t not in nullable:
break
else:
nullable.add(p.name)
if len(nullable) == num_nullable:
break
num_nullable = len(nullable)
return nullable
# -----------------------------------------------------------------------------
# find_nonterminal_trans(C)
#
# Given a set of LR(0) items, this functions finds all of the non-terminal
# transitions. These are transitions in which a dot appears immediately before
# a non-terminal. Returns a list of tuples of the form (state,N) where state
# is the state number and N is the nonterminal symbol.
#
# The input C is the set of LR(0) items.
# -----------------------------------------------------------------------------
def find_nonterminal_transitions(self, C):
trans = []
for stateno, state in enumerate(C):
for p in state:
if p.lr_index < p.len - 1:
t = (stateno, p.prod[p.lr_index+1])
if t[1] in self.grammar.Nonterminals:
if t not in trans:
trans.append(t)
return trans
# -----------------------------------------------------------------------------
# dr_relation()
#
# Computes the DR(p,A) relationships for non-terminal transitions. The input
# is a tuple (state,N) where state is a number and N is a nonterminal symbol.
#
# Returns a list of terminals.
# -----------------------------------------------------------------------------
def dr_relation(self, C, trans, nullable):
state, N = trans
terms = []
g = self.lr0_goto(C[state], N)
for p in g:
if p.lr_index < p.len - 1:
a = p.prod[p.lr_index+1]
if a in self.grammar.Terminals:
if a not in terms:
terms.append(a)
# This extra bit is to handle the start state
if state == 0 and N == self.grammar.Productions[0].prod[0]:
terms.append('$end')
return terms
# -----------------------------------------------------------------------------
# reads_relation()
#
# Computes the READS() relation (p,A) READS (t,C).
# -----------------------------------------------------------------------------
def reads_relation(self, C, trans, empty):
# Look for empty transitions
rel = []
state, N = trans
g = self.lr0_goto(C[state], N)
j = self.lr0_cidhash.get(id(g), -1)
for p in g:
if p.lr_index < p.len - 1:
a = p.prod[p.lr_index + 1]
if a in empty:
rel.append((j, a))
return rel
# -----------------------------------------------------------------------------
# compute_lookback_includes()
#
# Determines the lookback and includes relations
#
# LOOKBACK:
#
# This relation is determined by running the LR(0) state machine forward.
# For example, starting with a production "N : . A B C", we run it forward
# to obtain "N : A B C ." We then build a relationship between this final
# state and the starting state. These relationships are stored in a dictionary
# lookdict.
#
# INCLUDES:
#
# Computes the INCLUDE() relation (p,A) INCLUDES (p',B).
#
# This relation is used to determine non-terminal transitions that occur
# inside of other non-terminal transition states. (p,A) INCLUDES (p', B)
# if the following holds:
#
# B -> LAT, where T -> epsilon and p' -L-> p
#
# L is essentially a prefix (which may be empty), T is a suffix that must be
# able to derive an empty string. State p' must lead to state p with the string L.
#
# -----------------------------------------------------------------------------
def compute_lookback_includes(self, C, trans, nullable):
lookdict = {} # Dictionary of lookback relations
includedict = {} # Dictionary of include relations
# Make a dictionary of non-terminal transitions
dtrans = {}
for t in trans:
dtrans[t] = 1
# Loop over all transitions and compute lookbacks and includes
for state, N in trans:
lookb = []
includes = []
for p in C[state]:
if p.name != N:
continue
# Okay, we have a name match. We now follow the production all the way
# through the state machine until we get the . on the right hand side
lr_index = p.lr_index
j = state
while lr_index < p.len - 1:
lr_index = lr_index + 1
t = p.prod[lr_index]
# Check to see if this symbol and state are a non-terminal transition
if (j, t) in dtrans:
# Yes. Okay, there is some chance that this is an includes relation
# the only way to know for certain is whether the rest of the
# production derives empty
li = lr_index + 1
while li < p.len:
if p.prod[li] in self.grammar.Terminals:
break # No forget it
if p.prod[li] not in nullable:
break
li = li + 1
else:
# Appears to be a relation between (j,t) and (state,N)
includes.append((j, t))
g = self.lr0_goto(C[j], t) # Go to next set
j = self.lr0_cidhash.get(id(g), -1) # Go to next state
# When we get here, j is the final state, now we have to locate the production
for r in C[j]:
if r.name != p.name:
continue
if r.len != p.len:
continue
i = 0
# This look is comparing a production ". A B C" with "A B C ."
while i < r.lr_index:
if r.prod[i] != p.prod[i+1]:
break
i = i + 1
else:
lookb.append((j, r))
for i in includes:
if i not in includedict:
includedict[i] = []
includedict[i].append((state, N))
lookdict[(state, N)] = lookb
return lookdict, includedict
# -----------------------------------------------------------------------------
# compute_read_sets()
#
# Given a set of LR(0) items, this function computes the read sets.
#
# Inputs: C = Set of LR(0) items
# ntrans = Set of nonterminal transitions
# nullable = Set of empty transitions
#
# Returns a set containing the read sets
# -----------------------------------------------------------------------------
def compute_read_sets(self, C, ntrans, nullable):
FP = lambda x: self.dr_relation(C, x, nullable)
R = lambda x: self.reads_relation(C, x, nullable)
F = digraph(ntrans, R, FP)
return F
# -----------------------------------------------------------------------------
# compute_follow_sets()
#
# Given a set of LR(0) items, a set of non-terminal transitions, a readset,
# and an include set, this function computes the follow sets
#
# Follow(p,A) = Read(p,A) U U {Follow(p',B) | (p,A) INCLUDES (p',B)}
#
# Inputs:
# ntrans = Set of nonterminal transitions
# readsets = Readset (previously computed)
# inclsets = Include sets (previously computed)
#
# Returns a set containing the follow sets
# -----------------------------------------------------------------------------
def compute_follow_sets(self, ntrans, readsets, inclsets):
FP = lambda x: readsets[x]
R = lambda x: inclsets.get(x, [])
F = digraph(ntrans, R, FP)
return F
# -----------------------------------------------------------------------------
# add_lookaheads()
#
# Attaches the lookahead symbols to grammar rules.
#
# Inputs: lookbacks - Set of lookback relations
# followset - Computed follow set
#
# This function directly attaches the lookaheads to productions contained
# in the lookbacks set
# -----------------------------------------------------------------------------
def add_lookaheads(self, lookbacks, followset):
for trans, lb in lookbacks.items():
# Loop over productions in lookback
for state, p in lb:
if state not in p.lookaheads:
p.lookaheads[state] = []
f = followset.get(trans, [])
for a in f:
if a not in p.lookaheads[state]:
p.lookaheads[state].append(a)
# -----------------------------------------------------------------------------
# add_lalr_lookaheads()
#
# This function does all of the work of adding lookahead information for use
# with LALR parsing
# -----------------------------------------------------------------------------
def add_lalr_lookaheads(self, C):
# Determine all of the nullable nonterminals
nullable = self.compute_nullable_nonterminals()
# Find all non-terminal transitions
trans = self.find_nonterminal_transitions(C)
# Compute read sets
readsets = self.compute_read_sets(C, trans, nullable)
# Compute lookback/includes relations
lookd, included = self.compute_lookback_includes(C, trans, nullable)
# Compute LALR FOLLOW sets
followsets = self.compute_follow_sets(trans, readsets, included)
# Add all of the lookaheads
self.add_lookaheads(lookd, followsets)
# -----------------------------------------------------------------------------
# lr_parse_table()
#
# This function constructs the parse tables for SLR or LALR
# -----------------------------------------------------------------------------
def lr_parse_table(self):
Productions = self.grammar.Productions
Precedence = self.grammar.Precedence
goto = self.lr_goto # Goto array
action = self.lr_action # Action array
log = self.log # Logger for output
actionp = {} # Action production array (temporary)
log.info('Parsing method: %s', self.lr_method)
# Step 1: Construct C = { I0, I1, ... IN}, collection of LR(0) items
# This determines the number of states
C = self.lr0_items()
if self.lr_method == 'LALR':
self.add_lalr_lookaheads(C)
# Build the parser table, state by state
st = 0
for I in C:
# Loop over each production in I
actlist = [] # List of actions
st_action = {}
st_actionp = {}
st_goto = {}
log.info('')
log.info('state %d', st)
log.info('')
for p in I:
log.info(' (%d) %s', p.number, p)
log.info('')
for p in I:
if p.len == p.lr_index + 1:
if p.name == "S'":
# Start symbol. Accept!
st_action['$end'] = 0
st_actionp['$end'] = p
else:
# We are at the end of a production. Reduce!
if self.lr_method == 'LALR':
laheads = p.lookaheads[st]
else:
laheads = self.grammar.Follow[p.name]
for a in laheads:
actlist.append((a, p, 'reduce using rule %d (%s)' % (p.number, p)))
r = st_action.get(a)
if r is not None:
# Whoa. Have a shift/reduce or reduce/reduce conflict
if r > 0:
# Need to decide on shift or reduce here
# By default we favor shifting. Need to add
# some precedence rules here.
# Shift precedence comes from the token
sprec, slevel = Precedence.get(a, ('right', 0))
# Reduce precedence comes from rule being reduced (p)
rprec, rlevel = Productions[p.number].prec
if (slevel < rlevel) or ((slevel == rlevel) and (rprec == 'left')):
# We really need to reduce here.
st_action[a] = -p.number
st_actionp[a] = p
if not slevel and not rlevel:
log.info(' ! shift/reduce conflict for %s resolved as reduce', a)
self.sr_conflicts.append((st, a, 'reduce'))
Productions[p.number].reduced += 1
elif (slevel == rlevel) and (rprec == 'nonassoc'):
st_action[a] = None
else:
# Hmmm. Guess we'll keep the shift
if not rlevel:
log.info(' ! shift/reduce conflict for %s resolved as shift', a)
self.sr_conflicts.append((st, a, 'shift'))
elif r < 0:
# Reduce/reduce conflict. In this case, we favor the rule
# that was defined first in the grammar file
oldp = Productions[-r]
pp = Productions[p.number]
if oldp.line > pp.line:
st_action[a] = -p.number
st_actionp[a] = p
chosenp, rejectp = pp, oldp
Productions[p.number].reduced += 1
Productions[oldp.number].reduced -= 1
else:
chosenp, rejectp = oldp, pp
self.rr_conflicts.append((st, chosenp, rejectp))
log.info(' ! reduce/reduce conflict for %s resolved using rule %d (%s)',
a, st_actionp[a].number, st_actionp[a])
else:
raise LALRError('Unknown conflict in state %d' % st)
else:
st_action[a] = -p.number
st_actionp[a] = p
Productions[p.number].reduced += 1
else:
i = p.lr_index
a = p.prod[i+1] # Get symbol right after the "."
if a in self.grammar.Terminals:
g = self.lr0_goto(I, a)
j = self.lr0_cidhash.get(id(g), -1)
if j >= 0:
# We are in a shift state
actlist.append((a, p, 'shift and go to state %d' % j))
r = st_action.get(a)
if r is not None:
# Whoa have a shift/reduce or shift/shift conflict
if r > 0:
if r != j:
raise LALRError('Shift/shift conflict in state %d' % st)
elif r < 0:
# Do a precedence check.
# - if precedence of reduce rule is higher, we reduce.
# - if precedence of reduce is same and left assoc, we reduce.
# - otherwise we shift
# Shift precedence comes from the token
sprec, slevel = Precedence.get(a, ('right', 0))
# Reduce precedence comes from the rule that could have been reduced
rprec, rlevel = Productions[st_actionp[a].number].prec
if (slevel > rlevel) or ((slevel == rlevel) and (rprec == 'right')):
# We decide to shift here... highest precedence to shift
Productions[st_actionp[a].number].reduced -= 1
st_action[a] = j
st_actionp[a] = p
if not rlevel:
log.info(' ! shift/reduce conflict for %s resolved as shift', a)
self.sr_conflicts.append((st, a, 'shift'))
elif (slevel == rlevel) and (rprec == 'nonassoc'):
st_action[a] = None
else:
# Hmmm. Guess we'll keep the reduce
if not slevel and not rlevel:
log.info(' ! shift/reduce conflict for %s resolved as reduce', a)
self.sr_conflicts.append((st, a, 'reduce'))
else:
raise LALRError('Unknown conflict in state %d' % st)
else:
st_action[a] = j
st_actionp[a] = p
# Print the actions associated with each terminal
_actprint = {}
for a, p, m in actlist:
if a in st_action:
if p is st_actionp[a]:
log.info(' %-15s %s', a, m)
_actprint[(a, m)] = 1
log.info('')
# Print the actions that were not used. (debugging)
not_used = 0
for a, p, m in actlist:
if a in st_action:
if p is not st_actionp[a]:
if not (a, m) in _actprint:
log.debug(' ! %-15s [ %s ]', a, m)
not_used = 1
_actprint[(a, m)] = 1
if not_used:
log.debug('')
# Construct the goto table for this state
nkeys = {}
for ii in I:
for s in ii.usyms:
if s in self.grammar.Nonterminals:
nkeys[s] = None
for n in nkeys:
g = self.lr0_goto(I, n)
j = self.lr0_cidhash.get(id(g), -1)
if j >= 0:
st_goto[n] = j
log.info(' %-30s shift and go to state %d', n, j)
action[st] = st_action
actionp[st] = st_actionp
goto[st] = st_goto
st += 1
# -----------------------------------------------------------------------------
# write()
#
# This function writes the LR parsing tables to a file
# -----------------------------------------------------------------------------
def write_table(self, tabmodule, outputdir='', signature=''):
if isinstance(tabmodule, types.ModuleType):
raise IOError("Won't overwrite existing tabmodule")
basemodulename = tabmodule.split('.')[-1]
filename = os.path.join(outputdir, basemodulename) + '.py'
try:
f = open(filename, 'w')
f.write('''
# %s
# This file is automatically generated. Do not edit.
# pylint: disable=W,C,R
_tabversion = %r
_lr_method = %r
_lr_signature = %r
''' % (os.path.basename(filename), __tabversion__, self.lr_method, signature))
# Change smaller to 0 to go back to original tables
smaller = 1
# Factor out names to try and make smaller
if smaller:
items = {}
for s, nd in self.lr_action.items():
for name, v in nd.items():
i = items.get(name)
if not i:
i = ([], [])
items[name] = i
i[0].append(s)
i[1].append(v)
f.write('\n_lr_action_items = {')
for k, v in items.items():
f.write('%r:([' % k)
for i in v[0]:
f.write('%r,' % i)
f.write('],[')
for i in v[1]:
f.write('%r,' % i)
f.write(']),')
f.write('}\n')
f.write('''
_lr_action = {}
for _k, _v in _lr_action_items.items():
for _x,_y in zip(_v[0],_v[1]):
if not _x in _lr_action: _lr_action[_x] = {}
_lr_action[_x][_k] = _y
del _lr_action_items
''')
else:
f.write('\n_lr_action = { ')
for k, v in self.lr_action.items():
f.write('(%r,%r):%r,' % (k[0], k[1], v))
f.write('}\n')
if smaller:
# Factor out names to try and make smaller
items = {}
for s, nd in self.lr_goto.items():
for name, v in nd.items():
i = items.get(name)
if not i:
i = ([], [])
items[name] = i
i[0].append(s)
i[1].append(v)
f.write('\n_lr_goto_items = {')
for k, v in items.items():
f.write('%r:([' % k)
for i in v[0]:
f.write('%r,' % i)
f.write('],[')
for i in v[1]:
f.write('%r,' % i)
f.write(']),')
f.write('}\n')
f.write('''
_lr_goto = {}
for _k, _v in _lr_goto_items.items():
for _x, _y in zip(_v[0], _v[1]):
if not _x in _lr_goto: _lr_goto[_x] = {}
_lr_goto[_x][_k] = _y
del _lr_goto_items
''')
else:
f.write('\n_lr_goto = { ')
for k, v in self.lr_goto.items():
f.write('(%r,%r):%r,' % (k[0], k[1], v))
f.write('}\n')
# Write production table
f.write('_lr_productions = [\n')
for p in self.lr_productions:
if p.func:
f.write(' (%r,%r,%d,%r,%r,%d),\n' % (p.str, p.name, p.len,
p.func, os.path.basename(p.file), p.line))
else:
f.write(' (%r,%r,%d,None,None,None),\n' % (str(p), p.name, p.len))
f.write(']\n')
f.close()
except IOError as e:
raise
# -----------------------------------------------------------------------------
# pickle_table()
#
# This function pickles the LR parsing tables to a supplied file object
# -----------------------------------------------------------------------------
def pickle_table(self, filename, signature=''):
try:
import cPickle as pickle
except ImportError:
import pickle
with open(filename, 'wb') as outf:
pickle.dump(__tabversion__, outf, pickle_protocol)
pickle.dump(self.lr_method, outf, pickle_protocol)
pickle.dump(signature, outf, pickle_protocol)
pickle.dump(self.lr_action, outf, pickle_protocol)
pickle.dump(self.lr_goto, outf, pickle_protocol)
outp = []
for p in self.lr_productions:
if p.func:
outp.append((p.str, p.name, p.len, p.func, os.path.basename(p.file), p.line))
else:
outp.append((str(p), p.name, p.len, None, None, None))
pickle.dump(outp, outf, pickle_protocol)
# -----------------------------------------------------------------------------
# === INTROSPECTION ===
#
# The following functions and classes are used to implement the PLY
# introspection features followed by the yacc() function itself.
# -----------------------------------------------------------------------------
# -----------------------------------------------------------------------------
# get_caller_module_dict()
#
# This function returns a dictionary containing all of the symbols defined within
# a caller further down the call stack. This is used to get the environment
# associated with the yacc() call if none was provided.
# -----------------------------------------------------------------------------
def get_caller_module_dict(levels):
f = sys._getframe(levels)
ldict = f.f_globals.copy()
if f.f_globals != f.f_locals:
ldict.update(f.f_locals)
return ldict
# -----------------------------------------------------------------------------
# parse_grammar()
#
# This takes a raw grammar rule string and parses it into production data
# -----------------------------------------------------------------------------
def parse_grammar(doc, file, line):
grammar = []
# Split the doc string into lines
pstrings = doc.splitlines()
lastp = None
dline = line
for ps in pstrings:
dline += 1
p = ps.split()
if not p:
continue
try:
if p[0] == '|':
# This is a continuation of a previous rule
if not lastp:
raise SyntaxError("%s:%d: Misplaced '|'" % (file, dline))
prodname = lastp
syms = p[1:]
else:
prodname = p[0]
lastp = prodname
syms = p[2:]
assign = p[1]
if assign != ':' and assign != '::=':
raise SyntaxError("%s:%d: Syntax error. Expected ':'" % (file, dline))
grammar.append((file, dline, prodname, syms))
except SyntaxError:
raise
except Exception:
raise SyntaxError('%s:%d: Syntax error in rule %r' % (file, dline, ps.strip()))
return grammar
# -----------------------------------------------------------------------------
# ParserReflect()
#
# This class represents information extracted for building a parser including
# start symbol, error function, tokens, precedence list, action functions,
# etc.
# -----------------------------------------------------------------------------
class ParserReflect(object):
def __init__(self, pdict, log=None):
self.pdict = pdict
self.start = None
self.error_func = None
self.tokens = None
self.modules = set()
self.grammar = []
self.error = False
if log is None:
self.log = PlyLogger(sys.stderr)
else:
self.log = log
# Get all of the basic information
def get_all(self):
self.get_start()
self.get_error_func()
self.get_tokens()
self.get_precedence()
self.get_pfunctions()
# Validate all of the information
def validate_all(self):
self.validate_start()
self.validate_error_func()
self.validate_tokens()
self.validate_precedence()
self.validate_pfunctions()
self.validate_modules()
return self.error
# Compute a signature over the grammar
def signature(self):
parts = []
try:
if self.start:
parts.append(self.start)
if self.prec:
parts.append(''.join([''.join(p) for p in self.prec]))
if self.tokens:
parts.append(' '.join(self.tokens))
for f in self.pfuncs:
if f[3]:
parts.append(f[3])
except (TypeError, ValueError):
pass
return ''.join(parts)
# -----------------------------------------------------------------------------
# validate_modules()
#
# This method checks to see if there are duplicated p_rulename() functions
# in the parser module file. Without this function, it is really easy for
# users to make mistakes by cutting and pasting code fragments (and it's a real
# bugger to try and figure out why the resulting parser doesn't work). Therefore,
# we just do a little regular expression pattern matching of def statements
# to try and detect duplicates.
# -----------------------------------------------------------------------------
def validate_modules(self):
# Match def p_funcname(
fre = re.compile(r'\s*def\s+(p_[a-zA-Z_0-9]*)\(')
for module in self.modules:
try:
lines, linen = inspect.getsourcelines(module)
except IOError:
continue
counthash = {}
for linen, line in enumerate(lines):
linen += 1
m = fre.match(line)
if m:
name = m.group(1)
prev = counthash.get(name)
if not prev:
counthash[name] = linen
else:
filename = inspect.getsourcefile(module)
self.log.warning('%s:%d: Function %s redefined. Previously defined on line %d',
filename, linen, name, prev)
# Get the start symbol
def get_start(self):
self.start = self.pdict.get('start')
# Validate the start symbol
def validate_start(self):
if self.start is not None:
if not isinstance(self.start, string_types):
self.log.error("'start' must be a string")
# Look for error handler
def get_error_func(self):
self.error_func = self.pdict.get('p_error')
# Validate the error function
def validate_error_func(self):
if self.error_func:
if isinstance(self.error_func, types.FunctionType):
ismethod = 0
elif isinstance(self.error_func, types.MethodType):
ismethod = 1
else:
self.log.error("'p_error' defined, but is not a function or method")
self.error = True
return
eline = self.error_func.__code__.co_firstlineno
efile = self.error_func.__code__.co_filename
module = inspect.getmodule(self.error_func)
self.modules.add(module)
argcount = self.error_func.__code__.co_argcount - ismethod
if argcount != 1:
self.log.error('%s:%d: p_error() requires 1 argument', efile, eline)
self.error = True
# Get the tokens map
def get_tokens(self):
tokens = self.pdict.get('tokens')
if not tokens:
self.log.error('No token list is defined')
self.error = True
return
if not isinstance(tokens, (list, tuple)):
self.log.error('tokens must be a list or tuple')
self.error = True
return
if not tokens:
self.log.error('tokens is empty')
self.error = True
return
self.tokens = sorted(tokens)
# Validate the tokens
def validate_tokens(self):
# Validate the tokens.
if 'error' in self.tokens:
self.log.error("Illegal token name 'error'. Is a reserved word")
self.error = True
return
terminals = set()
for n in self.tokens:
if n in terminals:
self.log.warning('Token %r multiply defined', n)
terminals.add(n)
# Get the precedence map (if any)
def get_precedence(self):
self.prec = self.pdict.get('precedence')
# Validate and parse the precedence map
def validate_precedence(self):
preclist = []
if self.prec:
if not isinstance(self.prec, (list, tuple)):
self.log.error('precedence must be a list or tuple')
self.error = True
return
for level, p in enumerate(self.prec):
if not isinstance(p, (list, tuple)):
self.log.error('Bad precedence table')
self.error = True
return
if len(p) < 2:
self.log.error('Malformed precedence entry %s. Must be (assoc, term, ..., term)', p)
self.error = True
return
assoc = p[0]
if not isinstance(assoc, string_types):
self.log.error('precedence associativity must be a string')
self.error = True
return
for term in p[1:]:
if not isinstance(term, string_types):
self.log.error('precedence items must be strings')
self.error = True
return
preclist.append((term, assoc, level+1))
self.preclist = preclist
# Get all p_functions from the grammar
def get_pfunctions(self):
p_functions = []
for name, item in self.pdict.items():
if not name.startswith('p_') or name == 'p_error':
continue
if isinstance(item, (types.FunctionType, types.MethodType)):
line = getattr(item, 'co_firstlineno', item.__code__.co_firstlineno)
module = inspect.getmodule(item)
p_functions.append((line, module, name, item.__doc__))
# Sort all of the actions by line number; make sure to stringify
# modules to make them sortable, since `line` may not uniquely sort all
# p functions
p_functions.sort(key=lambda p_function: (
p_function[0],
str(p_function[1]),
p_function[2],
p_function[3]))
self.pfuncs = p_functions
# Validate all of the p_functions
def validate_pfunctions(self):
grammar = []
# Check for non-empty symbols
if len(self.pfuncs) == 0:
self.log.error('no rules of the form p_rulename are defined')
self.error = True
return
for line, module, name, doc in self.pfuncs:
file = inspect.getsourcefile(module)
func = self.pdict[name]
if isinstance(func, types.MethodType):
reqargs = 2
else:
reqargs = 1
if func.__code__.co_argcount > reqargs:
self.log.error('%s:%d: Rule %r has too many arguments', file, line, func.__name__)
self.error = True
elif func.__code__.co_argcount < reqargs:
self.log.error('%s:%d: Rule %r requires an argument', file, line, func.__name__)
self.error = True
elif not func.__doc__:
self.log.warning('%s:%d: No documentation string specified in function %r (ignored)',
file, line, func.__name__)
else:
try:
parsed_g = parse_grammar(doc, file, line)
for g in parsed_g:
grammar.append((name, g))
except SyntaxError as e:
self.log.error(str(e))
self.error = True
# Looks like a valid grammar rule
# Mark the file in which defined.
self.modules.add(module)
# Secondary validation step that looks for p_ definitions that are not functions
# or functions that look like they might be grammar rules.
for n, v in self.pdict.items():
if n.startswith('p_') and isinstance(v, (types.FunctionType, types.MethodType)):
continue
if n.startswith('t_'):
continue
if n.startswith('p_') and n != 'p_error':
self.log.warning('%r not defined as a function', n)
if ((isinstance(v, types.FunctionType) and v.__code__.co_argcount == 1) or
(isinstance(v, types.MethodType) and v.__func__.__code__.co_argcount == 2)):
if v.__doc__:
try:
doc = v.__doc__.split(' ')
if doc[1] == ':':
self.log.warning('%s:%d: Possible grammar rule %r defined without p_ prefix',
v.__code__.co_filename, v.__code__.co_firstlineno, n)
except IndexError:
pass
self.grammar = grammar
# -----------------------------------------------------------------------------
# yacc(module)
#
# Build a parser
# -----------------------------------------------------------------------------
def yacc(method='LALR', debug=yaccdebug, module=None, tabmodule=tab_module, start=None,
check_recursion=True, optimize=False, write_tables=True, debugfile=debug_file,
outputdir=None, debuglog=None, errorlog=None, picklefile=None):
if tabmodule is None:
tabmodule = tab_module
# Reference to the parsing method of the last built parser
global parse
# If pickling is enabled, table files are not created
if picklefile:
write_tables = 0
if errorlog is None:
errorlog = PlyLogger(sys.stderr)
# Get the module dictionary used for the parser
if module:
_items = [(k, getattr(module, k)) for k in dir(module)]
pdict = dict(_items)
# If no __file__ or __package__ attributes are available, try to obtain them
# from the __module__ instead
if '__file__' not in pdict:
pdict['__file__'] = sys.modules[pdict['__module__']].__file__
if '__package__' not in pdict and '__module__' in pdict:
if hasattr(sys.modules[pdict['__module__']], '__package__'):
pdict['__package__'] = sys.modules[pdict['__module__']].__package__
else:
pdict = get_caller_module_dict(2)
if outputdir is None:
# If no output directory is set, the location of the output files
# is determined according to the following rules:
# - If tabmodule specifies a package, files go into that package directory
# - Otherwise, files go in the same directory as the specifying module
if isinstance(tabmodule, types.ModuleType):
srcfile = tabmodule.__file__
else:
if '.' not in tabmodule:
srcfile = pdict['__file__']
else:
parts = tabmodule.split('.')
pkgname = '.'.join(parts[:-1])
exec('import %s' % pkgname)
srcfile = getattr(sys.modules[pkgname], '__file__', '')
outputdir = os.path.dirname(srcfile)
# Determine if the module is package of a package or not.
# If so, fix the tabmodule setting so that tables load correctly
pkg = pdict.get('__package__')
if pkg and isinstance(tabmodule, str):
if '.' not in tabmodule:
tabmodule = pkg + '.' + tabmodule
# Set start symbol if it's specified directly using an argument
if start is not None:
pdict['start'] = start
# Collect parser information from the dictionary
pinfo = ParserReflect(pdict, log=errorlog)
pinfo.get_all()
if pinfo.error:
raise YaccError('Unable to build parser')
# Check signature against table files (if any)
signature = pinfo.signature()
# Read the tables
try:
lr = LRTable()
if picklefile:
read_signature = lr.read_pickle(picklefile)
else:
read_signature = lr.read_table(tabmodule)
if optimize or (read_signature == signature):
try:
lr.bind_callables(pinfo.pdict)
parser = LRParser(lr, pinfo.error_func)
parse = parser.parse
return parser
except Exception as e:
errorlog.warning('There was a problem loading the table file: %r', e)
except VersionError as e:
errorlog.warning(str(e))
except ImportError:
pass
if debuglog is None:
if debug:
try:
debuglog = PlyLogger(open(os.path.join(outputdir, debugfile), 'w'))
except IOError as e:
errorlog.warning("Couldn't open %r. %s" % (debugfile, e))
debuglog = NullLogger()
else:
debuglog = NullLogger()
debuglog.info('Created by PLY version %s (http://www.dabeaz.com/ply)', __version__)
errors = False
# Validate the parser information
if pinfo.validate_all():
raise YaccError('Unable to build parser')
if not pinfo.error_func:
errorlog.warning('no p_error() function is defined')
# Create a grammar object
grammar = Grammar(pinfo.tokens)
# Set precedence level for terminals
for term, assoc, level in pinfo.preclist:
try:
grammar.set_precedence(term, assoc, level)
except GrammarError as e:
errorlog.warning('%s', e)
# Add productions to the grammar
for funcname, gram in pinfo.grammar:
file, line, prodname, syms = gram
try:
grammar.add_production(prodname, syms, funcname, file, line)
except GrammarError as e:
errorlog.error('%s', e)
errors = True
# Set the grammar start symbols
try:
if start is None:
grammar.set_start(pinfo.start)
else:
grammar.set_start(start)
except GrammarError as e:
errorlog.error(str(e))
errors = True
if errors:
raise YaccError('Unable to build parser')
# Verify the grammar structure
undefined_symbols = grammar.undefined_symbols()
for sym, prod in undefined_symbols:
errorlog.error('%s:%d: Symbol %r used, but not defined as a token or a rule', prod.file, prod.line, sym)
errors = True
unused_terminals = grammar.unused_terminals()
if unused_terminals:
debuglog.info('')
debuglog.info('Unused terminals:')
debuglog.info('')
for term in unused_terminals:
errorlog.warning('Token %r defined, but not used', term)
debuglog.info(' %s', term)
# Print out all productions to the debug log
if debug:
debuglog.info('')
debuglog.info('Grammar')
debuglog.info('')
for n, p in enumerate(grammar.Productions):
debuglog.info('Rule %-5d %s', n, p)
# Find unused non-terminals
unused_rules = grammar.unused_rules()
for prod in unused_rules:
errorlog.warning('%s:%d: Rule %r defined, but not used', prod.file, prod.line, prod.name)
if len(unused_terminals) == 1:
errorlog.warning('There is 1 unused token')
if len(unused_terminals) > 1:
errorlog.warning('There are %d unused tokens', len(unused_terminals))
if len(unused_rules) == 1:
errorlog.warning('There is 1 unused rule')
if len(unused_rules) > 1:
errorlog.warning('There are %d unused rules', len(unused_rules))
if debug:
debuglog.info('')
debuglog.info('Terminals, with rules where they appear')
debuglog.info('')
terms = list(grammar.Terminals)
terms.sort()
for term in terms:
debuglog.info('%-20s : %s', term, ' '.join([str(s) for s in grammar.Terminals[term]]))
debuglog.info('')
debuglog.info('Nonterminals, with rules where they appear')
debuglog.info('')
nonterms = list(grammar.Nonterminals)
nonterms.sort()
for nonterm in nonterms:
debuglog.info('%-20s : %s', nonterm, ' '.join([str(s) for s in grammar.Nonterminals[nonterm]]))
debuglog.info('')
if check_recursion:
unreachable = grammar.find_unreachable()
for u in unreachable:
errorlog.warning('Symbol %r is unreachable', u)
infinite = grammar.infinite_cycles()
for inf in infinite:
errorlog.error('Infinite recursion detected for symbol %r', inf)
errors = True
unused_prec = grammar.unused_precedence()
for term, assoc in unused_prec:
errorlog.error('Precedence rule %r defined for unknown symbol %r', assoc, term)
errors = True
if errors:
raise YaccError('Unable to build parser')
# Run the LRGeneratedTable on the grammar
if debug:
errorlog.debug('Generating %s tables', method)
lr = LRGeneratedTable(grammar, method, debuglog)
if debug:
num_sr = len(lr.sr_conflicts)
# Report shift/reduce and reduce/reduce conflicts
if num_sr == 1:
errorlog.warning('1 shift/reduce conflict')
elif num_sr > 1:
errorlog.warning('%d shift/reduce conflicts', num_sr)
num_rr = len(lr.rr_conflicts)
if num_rr == 1:
errorlog.warning('1 reduce/reduce conflict')
elif num_rr > 1:
errorlog.warning('%d reduce/reduce conflicts', num_rr)
# Write out conflicts to the output file
if debug and (lr.sr_conflicts or lr.rr_conflicts):
debuglog.warning('')
debuglog.warning('Conflicts:')
debuglog.warning('')
for state, tok, resolution in lr.sr_conflicts:
debuglog.warning('shift/reduce conflict for %s in state %d resolved as %s', tok, state, resolution)
already_reported = set()
for state, rule, rejected in lr.rr_conflicts:
if (state, id(rule), id(rejected)) in already_reported:
continue
debuglog.warning('reduce/reduce conflict in state %d resolved using rule (%s)', state, rule)
debuglog.warning('rejected rule (%s) in state %d', rejected, state)
errorlog.warning('reduce/reduce conflict in state %d resolved using rule (%s)', state, rule)
errorlog.warning('rejected rule (%s) in state %d', rejected, state)
already_reported.add((state, id(rule), id(rejected)))
warned_never = []
for state, rule, rejected in lr.rr_conflicts:
if not rejected.reduced and (rejected not in warned_never):
debuglog.warning('Rule (%s) is never reduced', rejected)
errorlog.warning('Rule (%s) is never reduced', rejected)
warned_never.append(rejected)
# Write the table file if requested
if write_tables:
try:
lr.write_table(tabmodule, outputdir, signature)
if tabmodule in sys.modules:
del sys.modules[tabmodule]
except IOError as e:
errorlog.warning("Couldn't create %r. %s" % (tabmodule, e))
# Write a pickled version of the tables
if picklefile:
try:
lr.pickle_table(picklefile, signature)
except IOError as e:
errorlog.warning("Couldn't create %r. %s" % (picklefile, e))
# Build the parser
lr.bind_callables(pinfo.pdict)
parser = LRParser(lr, pinfo.error_func)
parse = parser.parse
return parser
|
isc
|
97644d51771f08d4c571307763fe819e
| 38.344365
| 119
| 0.46789
| 4.801267
| false
| false
| false
| false
|
rdegges/django-twilio
|
django_twilio/models.py
|
1
|
1827
|
# -*- coding: utf-8 -*-
from django.db import models
from django.conf import settings
from phonenumber_field.modelfields import PhoneNumberField
AUTH_USER_MODEL = getattr(settings, 'AUTH_USER_MODEL', 'auth.User')
class Caller(models.Model):
"""
A caller is defined uniquely by their phone number.
:param bool blacklisted: Designates whether the caller can use our
services.
:param char phone_number: Unique phone number in `E.164
<http://en.wikipedia.org/wiki/E.164>`_ format.
"""
blacklisted = models.BooleanField(default=False)
phone_number = PhoneNumberField(unique=True)
def __str__(self):
return '{phone_number}{blacklist_status}'.format(
phone_number=str(self.phone_number),
blacklist_status=' (blacklisted)' if self.blacklisted else '',
)
class Meta:
app_label = 'django_twilio'
class Credential(models.Model):
"""
A Credential model is a set of SID / AUTH tokens for the Twilio.com API
The Credential model can be used if a project uses more than one
Twilio account, or provides Users with access to Twilio powered
web apps that need their own custom credentials.
:param char name: The name used to distinguish this credential
:param char account_sid: The Twilio account_sid
:param char auth_token: The Twilio auth_token
:param key user: The user linked to this Credential
"""
def __str__(self):
return '{name} - {sid}'.format(name=self.name, sid=self.account_sid)
name = models.CharField(max_length=30)
user = models.OneToOneField(AUTH_USER_MODEL, on_delete=models.CASCADE)
account_sid = models.CharField(max_length=34)
auth_token = models.CharField(max_length=32)
class Meta:
app_label = 'django_twilio'
|
unlicense
|
4348298ef46615125a833429bcd3cae9
| 28.467742
| 76
| 0.675424
| 3.822176
| false
| false
| false
| false
|
rdegges/django-twilio
|
test_project/test_app/models.py
|
1
|
1869
|
# -*- coding: utf-8 -*-
from types import MethodType
from django.test import TestCase
from django.contrib.auth.models import User
from django_dynamic_fixture import G
from django_twilio.models import Caller, Credential
class CallerTestCase(TestCase):
"""
Run tests against the :class:`django_twilio.models.Caller` model.
"""
def setUp(self):
self.caller = G(
Caller,
phone_number='+15005550000',
blacklisted=False,
)
def test_has_str_method(self):
self.assertIsInstance(self.caller.__str__, MethodType)
def test_str_returns_a_string(self):
self.assertIsInstance(self.caller.__str__(), str)
def test_str_doesnt_contain_blacklisted(self):
self.assertNotIn('blacklisted', self.caller.__str__())
def test_unicode_contains_blacklisted(self):
self.caller.blacklisted = True
self.caller.save()
self.assertIn('blacklisted', self.caller.__str__())
class CredentialTests(TestCase):
def setUp(self):
self.user = G(User, username='test', password='pass')
self.credentials = G(
Credential,
name='Test Credentials',
account_sid='XXX',
auth_token='YYY',
user=self.user,
)
def test_str(self):
"""
Assert that str renders how we'd like it too
"""
self.assertEqual(
self.credentials.__str__(),
'Test Credentials - XXX',
)
def test_credentials_fields(self):
"""
Assert the fields are working correctly
"""
self.assertEqual(self.credentials.name, 'Test Credentials')
self.assertEqual(self.credentials.account_sid, 'XXX')
self.assertEqual(self.credentials.auth_token, 'YYY')
self.assertEqual(self.credentials.user, self.user)
|
unlicense
|
9a7784fab1c7082497e409f11fdfa3da
| 26.485294
| 69
| 0.608347
| 4.134956
| false
| true
| false
| false
|
pikepdf/pikepdf
|
src/pikepdf/_cpphelpers.py
|
1
|
2965
|
# SPDX-FileCopyrightText: 2022 James R. Barlow
# SPDX-License-Identifier: MPL-2.0
"""Support functions called by the C++ library binding layer.
Not intended to be called from Python, and subject to change at any time.
"""
from __future__ import annotations
from typing import Callable
from warnings import warn
from pikepdf import Dictionary, Name, Pdf
def update_xmp_pdfversion(pdf: Pdf, version: str) -> None:
"""Update XMP metadata to specified PDF version."""
if Name.Metadata not in pdf.Root:
return # Don't create an empty XMP object just to store the version
with pdf.open_metadata(set_pikepdf_as_editor=False, update_docinfo=False) as meta:
if 'pdf:PDFVersion' in meta:
meta['pdf:PDFVersion'] = version
def _alpha(n: int) -> str:
"""Excel-style column numbering A..Z, AA..AZ..BA..ZZ.., AAA."""
if n < 1:
raise ValueError(f"Can't represent {n} in alphabetic numbering")
p = []
while n > 0:
n, r = divmod(n - 1, 26)
p.append(r)
base = ord('A')
ords = [(base + v) for v in reversed(p)]
return ''.join(chr(o) for o in ords)
def _roman(n: int) -> str:
"""Convert integer n to Roman numeral representation as a string."""
if not (1 <= n <= 5000):
raise ValueError(f"Can't represent {n} in Roman numerals")
roman_numerals = (
(1000, 'M'),
(900, 'CM'),
(500, 'D'),
(400, 'CD'),
(100, 'C'),
(90, 'XC'),
(50, 'L'),
(40, 'XL'),
(10, 'X'),
(9, 'IX'),
(5, 'V'),
(4, 'IV'),
(1, 'I'),
)
roman = ""
for value, numeral in roman_numerals:
while n >= value:
roman += numeral
n -= value
return roman
LABEL_STYLE_MAP: dict[Name, Callable[[int], str]] = {
Name.D: str,
Name.A: _alpha,
Name.a: lambda x: _alpha(x).lower(),
Name.R: _roman,
Name.r: lambda x: _roman(x).lower(),
}
def label_from_label_dict(label_dict: int | Dictionary) -> str:
"""Convert a label dictionary returned by QPDF into a text string."""
if isinstance(label_dict, int):
return str(label_dict)
label = ''
if Name.P in label_dict:
prefix = label_dict[Name.P]
label += str(prefix)
# If there is no S, return only the P portion
if Name.S in label_dict:
# St defaults to 1
numeric_value = label_dict[Name.St] if Name.St in label_dict else 1
if not isinstance(numeric_value, int):
warn(
"Page label dictionary has invalid non-integer start value", UserWarning
)
numeric_value = 1
style = label_dict[Name.S]
if isinstance(style, Name):
style_fn = LABEL_STYLE_MAP[style]
value = style_fn(numeric_value)
label += value
else:
warn("Page label dictionary has invalid page label style", UserWarning)
return label
|
mpl-2.0
|
0089edf3fde570663fa1b10f4c4e07c2
| 27.509615
| 88
| 0.571669
| 3.451688
| false
| false
| false
| false
|
pikepdf/pikepdf
|
tests/test_parsers.py
|
1
|
9538
|
# SPDX-FileCopyrightText: 2022 James R. Barlow
# SPDX-License-Identifier: CC0-1.0
from __future__ import annotations
import shutil
import sys
from subprocess import PIPE, run
import pytest
import pikepdf
from pikepdf import (
ContentStreamInlineImage,
ContentStreamInstruction,
Dictionary,
Name,
Object,
Operator,
Pdf,
PdfError,
PdfInlineImage,
PdfMatrix,
Stream,
_qpdf,
parse_content_stream,
unparse_content_stream,
)
from pikepdf._qpdf import StreamParser
from pikepdf.models import PdfParsingError
# pylint: disable=useless-super-delegation,redefined-outer-name
@pytest.fixture
def graph(resources):
yield Pdf.open(resources / 'graph.pdf')
@pytest.fixture
def inline(resources):
yield Pdf.open(resources / 'image-mono-inline.pdf')
class PrintParser(StreamParser):
def __init__(self):
super().__init__()
def handle_object(self, obj, *_args):
print(repr(obj))
def handle_eof(self):
print("--EOF--")
class ExceptionParser(StreamParser):
def __init__(self):
super().__init__()
def handle_object(self, obj, *_args): # pylint: disable=unused-argument
raise ValueError("I take exception to this")
def handle_eof(self):
print("--EOF--")
def slow_unparse_content_stream(instructions):
def encode(obj):
return _qpdf.unparse(obj)
def encode_iimage(iimage: PdfInlineImage):
return iimage.unparse()
def encode_operator(obj):
if isinstance(obj, Operator):
return obj.unparse()
return encode(Operator(obj))
def for_each_instruction():
for n, (operands, operator) in enumerate(instructions):
try:
if operator == Operator(b'INLINE IMAGE'):
iimage = operands[0]
if not isinstance(iimage, PdfInlineImage):
raise ValueError(
"Operator was INLINE IMAGE but operands were not "
"a PdfInlineImage"
)
line = encode_iimage(iimage)
else:
if operands:
line = b' '.join(encode(operand) for operand in operands)
line += b' ' + encode_operator(operator)
else:
line = encode_operator(operator)
except (PdfError, ValueError) as e:
raise PdfParsingError(line=n + 1) from e
yield line
return b'\n'.join(for_each_instruction())
def test_open_pdf(graph):
page = graph.pages[0]
Object._parse_stream(page.obj, PrintParser())
def test_parser_exception(graph):
stream = graph.pages[0]['/Contents']
with pytest.raises(ValueError):
Object._parse_stream(stream, ExceptionParser())
@pytest.mark.skipif(shutil.which('pdftotext') is None, reason="poppler not installed")
def test_text_filter(resources, outdir):
input_pdf = resources / 'veraPDF test suite 6-2-10-t02-pass-a.pdf'
# Ensure the test PDF has detect we can find
proc = run(
['pdftotext', str(input_pdf), '-'], check=True, stdout=PIPE, encoding='utf-8'
)
assert proc.stdout.strip() != '', "Need input test file that contains text"
with Pdf.open(input_pdf) as pdf:
page = pdf.pages[0]
keep = []
for operands, command in parse_content_stream(
page, """TJ Tj ' " BT ET Td TD Tm T* Tc Tw Tz TL Tf Tr Ts"""
):
if command == Operator('Tj'):
print("skipping Tj")
continue
keep.append((operands, command))
new_stream = Stream(pdf, pikepdf.unparse_content_stream(keep))
print(new_stream.read_bytes()) # pylint: disable=no-member
page['/Contents'] = new_stream
page['/Rotate'] = 90
pdf.save(outdir / 'notext.pdf', static_id=True)
proc = run(
['pdftotext', str(outdir / 'notext.pdf'), '-'],
check=True,
stdout=PIPE,
encoding='utf-8',
)
assert proc.stdout.strip() == '', "Expected text to be removed"
def test_invalid_stream_object():
with pytest.raises(TypeError, match="must be a pikepdf.Object"):
parse_content_stream(42)
with pytest.raises(TypeError, match="called on page or stream"):
parse_content_stream(Dictionary({"/Hi": 3}))
with pytest.raises(
TypeError, match="parse_content_stream called on non-stream Object"
):
false_page = Dictionary(Type=Name.Page, Contents=42)
parse_content_stream(false_page)
# @pytest.mark.parametrize(
# "test_file,expected",
# [
# ("fourpages.pdf", True),
# ("graph.pdf", False),
# ("veraPDF test suite 6-2-10-t02-pass-a.pdf", True),
# ("veraPDF test suite 6-2-3-3-t01-fail-c.pdf", False),
# ('sandwich.pdf', True),
# ],
# )
# def test_has_text(resources, test_file, expected):
# with Pdf.open(resources / test_file) as pdf:
# for p in pdf.pages:
# page = p
# assert page.has_text() == expected
def test_unparse_cs():
instructions = [
([], Operator('q')),
([*PdfMatrix.identity().shorthand], Operator('cm')),
([], Operator('Q')),
]
assert unparse_content_stream(instructions).strip() == b'q\n1 0 0 1 0 0 cm\nQ'
def test_unparse_failure():
instructions = [([float('nan')], Operator('cm'))]
with pytest.raises(PdfParsingError):
unparse_content_stream(instructions)
def test_parse_xobject(resources):
with Pdf.open(resources / 'formxobject.pdf') as pdf:
form1 = pdf.pages[0].Resources.XObject.Form1
instructions = parse_content_stream(form1)
assert instructions[0][1] == Operator('cm')
def test_parse_results(inline):
p0 = inline.pages[0]
cmds = parse_content_stream(p0)
assert isinstance(cmds[0], ContentStreamInstruction)
csi = cmds[0]
assert isinstance(csi.operands, _qpdf._ObjectList)
assert isinstance(csi.operator, Operator)
assert 'Operator' in repr(csi)
assert ContentStreamInstruction(cmds[0]).operator == cmds[0].operator
for cmd in cmds:
if isinstance(cmd, ContentStreamInlineImage):
assert cmd.operator == Operator("INLINE IMAGE")
assert isinstance(cmd.operands[0], PdfInlineImage)
assert 'INLINE' in repr(cmd)
assert cmd.operands[0] == cmd.iimage
def test_build_instructions():
cs = ContentStreamInstruction([1, 0, 0, 1, 0, 0], Operator('cm'))
assert 'cm' in repr(cs)
assert unparse_content_stream([cs]) == b'1 0 0 1 0 0 cm'
def test_unparse_interpret_operator():
commands = []
matrix = [2, 0, 0, 2, 0, 0]
commands.insert(0, (matrix, 'cm'))
commands.insert(0, (matrix, b'cm'))
commands.insert(0, (matrix, Operator('cm')))
unparsed = unparse_content_stream(commands)
assert (
unparsed
== b'2 0 0 2 0 0 cm\n2 0 0 2 0 0 cm\n2 0 0 2 0 0 cm'
== slow_unparse_content_stream(commands)
)
def test_unparse_inline(inline):
p0 = inline.pages[0]
cmds = parse_content_stream(p0)
unparsed = unparse_content_stream(cmds)
assert b'BI' in unparsed
assert unparsed == slow_unparse_content_stream(cmds)
def test_unparse_invalid_inline_image():
instructions = [((42,), Operator(b'INLINE IMAGE'))]
with pytest.raises(PdfParsingError):
unparse_content_stream(instructions)
def test_inline_copy(inline):
for instr in parse_content_stream(inline.pages[0].Contents):
if not isinstance(instr, ContentStreamInlineImage):
continue
csiimage = instr
_copy_of_csiimage = ContentStreamInlineImage(csiimage)
new_iimage = ContentStreamInlineImage(csiimage.iimage)
assert unparse_content_stream([new_iimage]).startswith(b'BI')
def test_end_inline_parse():
pdf = pikepdf.new()
pdf.add_blank_page(page_size=(1000, 1000))
stream = b"""
q 200 0 0 200 500 500 cm
BI
/W 1
/H 1
/BPC 8
/CS /RGB
ID \x80\x80\x80
EI Q
q 300 0 0 300 500 200 cm
BI
/W 2
/H 2
/BPC 8
/CS /RGB
ID \xff\x00\x00\x00\xff\x00\x00\xff\x00\x00\x00\xff
EI Q
"""
pdf.pages[0].Contents = pdf.make_stream(stream)
cs = parse_content_stream(pdf.pages[0])
assert unparse_content_stream(cs).split() == stream.split()
class TestMalformedContentStreamInstructions:
def test_rejects_not_list_of_pairs(self):
with pytest.raises(PdfParsingError):
unparse_content_stream([(1, 2, 3)])
def test_rejects_not_castable_to_object(self):
with pytest.raises(PdfParsingError, match="While unparsing"):
unparse_content_stream([(['one', 'two'], 42)]) # 42 is not an operator
def test_rejects_not_operator(self):
with pytest.raises(PdfParsingError, match="While unparsing"):
unparse_content_stream(
[(['one', 'two'], Name.FortyTwo)]
) # Name is not an operator
def test_rejects_inline_image_missing(self):
with pytest.raises(PdfParsingError):
unparse_content_stream(
[('should be a PdfInlineImage but is not', b'INLINE IMAGE')]
)
def test_accepts_all_lists(self):
unparse_content_stream([[[], b'Q']])
def test_accepts_all_tuples(self):
unparse_content_stream((((Name.Foo,), b'/Do'),))
|
mpl-2.0
|
319f8498404a91e7444e065c0a92dc71
| 28.713396
| 86
| 0.602852
| 3.600604
| false
| true
| false
| false
|
pikepdf/pikepdf
|
src/pikepdf/models/_transcoding.py
|
1
|
8054
|
# SPDX-FileCopyrightText: 2022 James R. Barlow
# SPDX-License-Identifier: MPL-2.0
from __future__ import annotations
import struct
from typing import Any, Callable, NamedTuple, Union
from PIL import Image
from PIL.TiffTags import TAGS_V2 as TIFF_TAGS
BytesLike = Union[bytes, memoryview]
MutableBytesLike = Union[bytearray, memoryview]
def _next_multiple(n: int, k: int) -> int:
"""Return the multiple of k that is greater than or equal n.
>>> _next_multiple(101, 4)
104
>>> _next_multiple(100, 4)
100
"""
div, mod = divmod(n, k)
if mod > 0:
div += 1
return div * k
def unpack_subbyte_pixels(
packed: BytesLike, size: tuple[int, int], bits: int, scale: int = 0
) -> tuple[BytesLike, int]:
"""Unpack subbyte *bits* pixels into full bytes and rescale.
When scale is 0, the appropriate scale is calculated.
e.g. for 2-bit, the scale is adjusted so that
0b00 = 0.00 = 0x00
0b01 = 0.33 = 0x55
0b10 = 0.66 = 0xaa
0b11 = 1.00 = 0xff
When scale is 1, no scaling is applied, appropriate when
the bytes are palette indexes.
"""
width, height = size
bits_per_byte = 8 // bits
stride = _next_multiple(width, bits_per_byte)
buffer = bytearray(bits_per_byte * stride * height)
max_read = len(buffer) // bits_per_byte
if scale == 0:
scale = 255 / ((2**bits) - 1)
if bits == 4:
_4bit_inner_loop(packed[:max_read], buffer, scale)
elif bits == 2:
_2bit_inner_loop(packed[:max_read], buffer, scale)
# elif bits == 1:
# _1bit_inner_loop(packed[:max_read], buffer, scale)
else:
raise NotImplementedError(bits)
return memoryview(buffer), stride
# def _1bit_inner_loop(in_: BytesLike, out: MutableBytesLike, scale: int) -> None:
# """Unpack 1-bit values to their 8-bit equivalents.
# Thus *out* must be 8x at long as *in*.
# """
# for n, val in enumerate(in_):
# out[8 * n + 0] = int((val >> 7) & 0b1) * scale
# out[8 * n + 1] = int((val >> 6) & 0b1) * scale
# out[8 * n + 2] = int((val >> 5) & 0b1) * scale
# out[8 * n + 3] = int((val >> 4) & 0b1) * scale
# out[8 * n + 4] = int((val >> 3) & 0b1) * scale
# out[8 * n + 5] = int((val >> 2) & 0b1) * scale
# out[8 * n + 6] = int((val >> 1) & 0b1) * scale
# out[8 * n + 7] = int((val >> 0) & 0b1) * scale
def _2bit_inner_loop(in_: BytesLike, out: MutableBytesLike, scale: int) -> None:
"""Unpack 2-bit values to their 8-bit equivalents.
Thus *out* must be 4x at long as *in*.
"""
for n, val in enumerate(in_):
out[4 * n] = int((val >> 6) * scale)
out[4 * n + 1] = int(((val >> 4) & 0b11) * scale)
out[4 * n + 2] = int(((val >> 2) & 0b11) * scale)
out[4 * n + 3] = int((val & 0b11) * scale)
def _4bit_inner_loop(in_: BytesLike, out: MutableBytesLike, scale: int) -> None:
"""Unpack 4-bit values to their 8-bit equivalents.
Thus *out* must be 2x at long as *in*.
"""
for n, val in enumerate(in_):
out[2 * n] = int((val >> 4) * scale)
out[2 * n + 1] = int((val & 0b1111) * scale)
def image_from_byte_buffer(buffer: BytesLike, size: tuple[int, int], stride: int):
"""Use Pillow to create one-component image from a byte buffer.
*stride* is the number of bytes per row, and is essential for packed bits
with odd image widths.
"""
ystep = 1 # image is top to bottom in memory
return Image.frombuffer('L', size, buffer, "raw", 'L', stride, ystep)
def _make_rgb_palette(gray_palette: bytes) -> bytes:
palette = b''
for entry in gray_palette:
palette += bytes([entry]) * 3
return palette
def _depalettize_cmyk(buffer: BytesLike, palette: BytesLike):
with memoryview(buffer) as mv:
output = bytearray(4 * len(mv))
for n, pal_idx in enumerate(mv):
output[4 * n : 4 * (n + 1)] = palette[4 * pal_idx : 4 * (pal_idx + 1)]
return output
def image_from_buffer_and_palette(
buffer: BytesLike,
size: tuple[int, int],
stride: int,
base_mode: str,
palette: BytesLike,
) -> Image.Image:
"""Construct an image from a byte buffer and apply the palette.
1/2/4-bit images must be unpacked (no scaling!) to byte buffers first, such
that every 8-bit integer is an index into the palette.
"""
# Reminder Pillow palette byte order unintentionally changed in 8.3.0
# https://github.com/python-pillow/Pillow/issues/5595
# 8.2.0: all aligned by channel (very nonstandard)
# 8.3.0: all channels for one color followed by the next color (e.g. RGBRGBRGB)
if base_mode == 'RGB':
im = image_from_byte_buffer(buffer, size, stride)
im.putpalette(palette, rawmode=base_mode)
elif base_mode == 'L':
# Pillow does not fully support palettes with rawmode='L'.
# Convert to RGB palette.
gray_palette = _make_rgb_palette(palette)
im = image_from_byte_buffer(buffer, size, stride)
im.putpalette(gray_palette, rawmode='RGB')
elif base_mode == 'CMYK':
# Pillow does not support CMYK with palettes; convert manually
output = _depalettize_cmyk(buffer, palette)
im = Image.frombuffer('CMYK', size, data=output, decoder_name='raw')
else:
raise NotImplementedError(f'palette with {base_mode}')
return im
def fix_1bit_palette_image(
im: Image.Image, base_mode: str, palette: BytesLike
) -> Image.Image:
"""Apply palettes to 1-bit images."""
im = im.convert('P')
if base_mode == 'RGB' and len(palette) == 6:
# rgbrgb -> rgb000000...rgb
palette = palette[0:3] + (b'\x00\x00\x00' * (256 - 2)) + palette[3:6]
im.putpalette(palette, rawmode='RGB')
elif base_mode == 'L':
try:
im.putpalette(palette, rawmode='L')
except ValueError as e:
if 'unrecognized raw mode' in str(e):
rgb_palette = _make_rgb_palette(palette)
im.putpalette(rgb_palette, rawmode='RGB')
return im
def generate_ccitt_header(
size: tuple[int, int],
data_length: int,
ccitt_group: int,
photometry: int,
icc: bytes,
) -> bytes:
"""Generate binary CCITT header for image with given parameters."""
tiff_header_struct = '<' + '2s' + 'H' + 'L' + 'H'
tag_keys = {tag.name: key for key, tag in TIFF_TAGS.items()} # type: ignore
ifd_struct = '<HHLL'
class IFD(NamedTuple):
key: int
typecode: Any
count_: int
data: int | Callable[[], int | None]
ifds: list[IFD] = []
def header_length(ifd_count) -> int:
return (
struct.calcsize(tiff_header_struct)
+ struct.calcsize(ifd_struct) * ifd_count
+ 4
)
def add_ifd(tag_name: str, data: int | Callable[[], int | None], count: int = 1):
key = tag_keys[tag_name]
typecode = TIFF_TAGS[key].type # type: ignore
ifds.append(IFD(key, typecode, count, data))
image_offset = None
width, height = size
add_ifd('ImageWidth', width)
add_ifd('ImageLength', height)
add_ifd('BitsPerSample', 1)
add_ifd('Compression', ccitt_group)
add_ifd('PhotometricInterpretation', int(photometry))
add_ifd('StripOffsets', lambda: image_offset)
add_ifd('RowsPerStrip', height)
add_ifd('StripByteCounts', data_length)
icc_offset = 0
if icc:
add_ifd('ICCProfile', lambda: icc_offset, count=len(icc))
icc_offset = header_length(len(ifds))
image_offset = icc_offset + len(icc)
ifd_args = [(arg() if callable(arg) else arg) for ifd in ifds for arg in ifd]
tiff_header = struct.pack(
(tiff_header_struct + ifd_struct[1:] * len(ifds) + 'L'),
b'II', # Byte order indication: Little endian
42, # Version number (always 42)
8, # Offset to first IFD
len(ifds), # Number of tags in IFD
*ifd_args,
0, # Last IFD
)
if icc:
tiff_header += icc
return tiff_header
|
mpl-2.0
|
b25111f680b52bf6eef2e4dbd67af7ef
| 32.144033
| 85
| 0.591383
| 3.15472
| false
| false
| false
| false
|
marl/jams
|
jams/sonify.py
|
1
|
6973
|
#!/usr/bin/env python
# CREATED:2015-12-12 18:20:37 by Brian McFee <brian.mcfee@nyu.edu>
r'''
Sonification
------------
.. autosummary::
:toctree: generated/
sonify
'''
from itertools import product
from collections import OrderedDict, defaultdict
import six
import numpy as np
import mir_eval.sonify
from mir_eval.util import filter_kwargs
from .eval import coerce_annotation, hierarchy_flatten
from .exceptions import NamespaceError
__all__ = ['sonify']
def mkclick(freq, sr=22050, duration=0.1):
'''Generate a click sample.
This replicates functionality from mir_eval.sonify.clicks,
but exposes the target frequency and duration.
'''
times = np.arange(int(sr * duration))
click = np.sin(2 * np.pi * times * freq / float(sr))
click *= np.exp(- times / (1e-2 * sr))
return click
def clicks(annotation, sr=22050, length=None, **kwargs):
'''Sonify events with clicks.
This uses mir_eval.sonify.clicks, and is appropriate for instantaneous
events such as beats or segment boundaries.
'''
interval, _ = annotation.to_interval_values()
return filter_kwargs(mir_eval.sonify.clicks, interval[:, 0],
fs=sr, length=length, **kwargs)
def downbeat(annotation, sr=22050, length=None, **kwargs):
'''Sonify beats and downbeats together.
'''
beat_click = mkclick(440 * 2, sr=sr)
downbeat_click = mkclick(440 * 3, sr=sr)
intervals, values = annotation.to_interval_values()
beats, downbeats = [], []
for time, value in zip(intervals[:, 0], values):
if value['position'] == 1:
downbeats.append(time)
else:
beats.append(time)
if length is None:
length = int(sr * np.max(intervals)) + len(beat_click) + 1
y = filter_kwargs(mir_eval.sonify.clicks,
np.asarray(beats),
fs=sr, length=length, click=beat_click)
y += filter_kwargs(mir_eval.sonify.clicks,
np.asarray(downbeats),
fs=sr, length=length, click=downbeat_click)
return y
def multi_segment(annotation, sr=22050, length=None, **kwargs):
'''Sonify multi-level segmentations'''
# Pentatonic scale, because why not
PENT = [1, 32./27, 4./3, 3./2, 16./9]
DURATION = 0.1
h_int, _ = hierarchy_flatten(annotation)
if length is None:
length = int(sr * (max(np.max(_) for _ in h_int) + 1. / DURATION) + 1)
y = 0.0
for ints, (oc, scale) in zip(h_int, product(range(3, 3 + len(h_int)),
PENT)):
click = mkclick(440.0 * scale * oc, sr=sr, duration=DURATION)
y = y + filter_kwargs(mir_eval.sonify.clicks,
np.unique(ints),
fs=sr, length=length,
click=click)
return y
def chord(annotation, sr=22050, length=None, **kwargs):
'''Sonify chords
This uses mir_eval.sonify.chords.
'''
intervals, chords = annotation.to_interval_values()
return filter_kwargs(mir_eval.sonify.chords,
chords, intervals,
fs=sr, length=length,
**kwargs)
def pitch_contour(annotation, sr=22050, length=None, **kwargs):
'''Sonify pitch contours.
This uses mir_eval.sonify.pitch_contour, and should only be applied
to pitch annotations using the pitch_contour namespace.
Each contour is sonified independently, and the resulting waveforms
are summed together.
'''
# Map contours to lists of observations
times = defaultdict(list)
freqs = defaultdict(list)
for obs in annotation:
times[obs.value['index']].append(obs.time)
freqs[obs.value['index']].append(obs.value['frequency'] *
(-1)**(~obs.value['voiced']))
y_out = 0.0
for ix in times:
y_out = y_out + filter_kwargs(mir_eval.sonify.pitch_contour,
np.asarray(times[ix]),
np.asarray(freqs[ix]),
fs=sr, length=length,
**kwargs)
if length is None:
length = len(y_out)
return y_out
def piano_roll(annotation, sr=22050, length=None, **kwargs):
'''Sonify a piano-roll
This uses mir_eval.sonify.time_frequency, and is appropriate
for sparse transcription data, e.g., annotations in the `note_midi`
namespace.
'''
intervals, pitches = annotation.to_interval_values()
# Construct the pitchogram
pitch_map = {f: idx for idx, f in enumerate(np.unique(pitches))}
gram = np.zeros((len(pitch_map), len(intervals)))
for col, f in enumerate(pitches):
gram[pitch_map[f], col] = 1
return filter_kwargs(mir_eval.sonify.time_frequency,
gram, pitches, intervals,
sr, length=length, **kwargs)
SONIFY_MAPPING = OrderedDict()
SONIFY_MAPPING['beat_position'] = downbeat
SONIFY_MAPPING['beat'] = clicks
SONIFY_MAPPING['multi_segment'] = multi_segment
SONIFY_MAPPING['segment_open'] = clicks
SONIFY_MAPPING['onset'] = clicks
SONIFY_MAPPING['chord'] = chord
SONIFY_MAPPING['note_hz'] = piano_roll
SONIFY_MAPPING['pitch_contour'] = pitch_contour
def sonify(annotation, sr=22050, duration=None, **kwargs):
'''Sonify a jams annotation through mir_eval
Parameters
----------
annotation : jams.Annotation
The annotation to sonify
sr = : positive number
The sampling rate of the output waveform
duration : float (optional)
Optional length (in seconds) of the output waveform
kwargs
Additional keyword arguments to mir_eval.sonify functions
Returns
-------
y_sonified : np.ndarray
The waveform of the sonified annotation
Raises
------
NamespaceError
If the annotation has an un-sonifiable namespace
'''
length = None
if duration is None:
duration = annotation.duration
if duration is not None:
length = int(duration * sr)
# If the annotation can be directly sonified, try that first
if annotation.namespace in SONIFY_MAPPING:
ann = coerce_annotation(annotation, annotation.namespace)
return SONIFY_MAPPING[annotation.namespace](ann,
sr=sr,
length=length,
**kwargs)
for namespace, func in six.iteritems(SONIFY_MAPPING):
try:
ann = coerce_annotation(annotation, namespace)
return func(ann, sr=sr, length=length, **kwargs)
except NamespaceError:
pass
raise NamespaceError('Unable to sonify annotation of namespace="{:s}"'
.format(annotation.namespace))
|
isc
|
898e4ac4ddaea3ed6558fd91d972f3f3
| 28.175732
| 78
| 0.588986
| 3.837644
| false
| false
| false
| false
|
marl/jams
|
docs/examples/example_beat.py
|
1
|
1897
|
#!/usr/bin/env python
import librosa
import jams
def beat_track(infile, outfile):
# Load the audio file
y, sr = librosa.load(infile)
# Compute the track duration
track_duration = librosa.get_duration(y=y, sr=sr)
# Extract tempo and beat estimates
tempo, beat_frames = librosa.beat.beat_track(y=y, sr=sr)
# Convert beat frames to time
beat_times = librosa.frames_to_time(beat_frames, sr=sr)
# Construct a new JAMS object and annotation records
jam = jams.JAMS()
# Store the track duration
jam.file_metadata.duration = track_duration
beat_a = jams.Annotation(namespace='beat')
beat_a.annotation_metadata = jams.AnnotationMetadata(data_source='librosa beat tracker')
# Add beat timings to the annotation record.
# The beat namespace does not require value or confidence fields,
# so we can leave those blank.
for t in beat_times:
beat_a.append(time=t, duration=0.0)
# Store the new annotation in the jam
jam.annotations.append(beat_a)
# Add tempo estimation to the annotation.
tempo_a = jams.Annotation(namespace='tempo', time=0, duration=track_duration)
tempo_a.annotation_metadata = jams.AnnotationMetadata(data_source='librosa tempo estimator')
# The tempo estimate is global, so it should start at time=0 and cover the full
# track duration.
# If we had a likelihood score on the estimation, it could be stored in
# `confidence`. Since we have no competing estimates, we'll set it to 1.0.
tempo_a.append(time=0.0,
duration=track_duration,
value=tempo,
confidence=1.0)
# Store the new annotation in the jam
jam.annotations.append(tempo_a)
# Save to disk
jam.save(outfile)
if __name__ == '__main__':
infile = librosa.util.example_audio_file()
beat_track(infile, 'output.jams')
|
isc
|
11ad7906f0e204363942f215b9629c0a
| 29.596774
| 96
| 0.672114
| 3.449091
| false
| false
| false
| false
|
mail-in-a-box/mailinabox
|
tests/fail2ban.py
|
1
|
6372
|
# Test that a box's fail2ban setting are working
# correctly by attempting a bunch of failed logins.
#
# Specify a SSH login command (which we use to reset
# fail2ban after each test) and the hostname to
# try to log in to.
######################################################################
import sys, os, time, functools
# parse command line
if len(sys.argv) != 4:
print("Usage: tests/fail2ban.py \"ssh user@hostname\" hostname owncloud_user")
sys.exit(1)
ssh_command, hostname, owncloud_user = sys.argv[1:4]
# define some test types
import socket
socket.setdefaulttimeout(10)
class IsBlocked(Exception):
"""Tests raise this exception when it appears that a fail2ban
jail is in effect, i.e. on a connection refused error."""
pass
def smtp_test():
import smtplib
try:
server = smtplib.SMTP(hostname, 587)
except ConnectionRefusedError:
# looks like fail2ban worked
raise IsBlocked()
server.starttls()
server.ehlo_or_helo_if_needed()
try:
server.login("fakeuser", "fakepassword")
raise Exception("authentication didn't fail")
except smtplib.SMTPAuthenticationError:
# athentication should fail
pass
try:
server.quit()
except:
# ignore errors here
pass
def imap_test():
import imaplib
try:
M = imaplib.IMAP4_SSL(hostname)
except ConnectionRefusedError:
# looks like fail2ban worked
raise IsBlocked()
try:
M.login("fakeuser", "fakepassword")
raise Exception("authentication didn't fail")
except imaplib.IMAP4.error:
# authentication should fail
pass
finally:
M.logout() # shuts down connection, has nothing to do with login()
def pop_test():
import poplib
try:
M = poplib.POP3_SSL(hostname)
except ConnectionRefusedError:
# looks like fail2ban worked
raise IsBlocked()
try:
M.user('fakeuser')
try:
M.pass_('fakepassword')
except poplib.error_proto as e:
# Authentication should fail.
M = None # don't .quit()
return
M.list()
raise Exception("authentication didn't fail")
finally:
if M:
M.quit()
def managesieve_test():
# We don't have a Python sieve client, so we'll
# just run the IMAP client and see what happens.
import imaplib
try:
M = imaplib.IMAP4(hostname, 4190)
except ConnectionRefusedError:
# looks like fail2ban worked
raise IsBlocked()
try:
M.login("fakeuser", "fakepassword")
raise Exception("authentication didn't fail")
except imaplib.IMAP4.error:
# authentication should fail
pass
finally:
M.logout() # shuts down connection, has nothing to do with login()
def http_test(url, expected_status, postdata=None, qsargs=None, auth=None):
import urllib.parse
import requests
from requests.auth import HTTPBasicAuth
# form request
url = urllib.parse.urljoin("https://" + hostname, url)
if qsargs: url += "?" + urllib.parse.urlencode(qsargs)
urlopen = requests.get if not postdata else requests.post
try:
# issue request
r = urlopen(
url,
auth=HTTPBasicAuth(*auth) if auth else None,
data=postdata,
headers={'User-Agent': 'Mail-in-a-Box fail2ban tester'},
timeout=8,
verify=False) # don't bother with HTTPS validation, it may not be configured yet
except requests.exceptions.ConnectTimeout as e:
raise IsBlocked()
except requests.exceptions.ConnectionError as e:
if "Connection refused" in str(e):
raise IsBlocked()
raise # some other unexpected condition
# return response status code
if r.status_code != expected_status:
r.raise_for_status() # anything but 200
raise IOError("Got unexpected status code %s." % r.status_code)
# define how to run a test
def restart_fail2ban_service(final=False):
# Log in over SSH to restart fail2ban.
command = "sudo fail2ban-client reload"
if not final:
# Stop recidive jails during testing.
command += " && sudo fail2ban-client stop recidive"
os.system("%s \"%s\"" % (ssh_command, command))
def testfunc_runner(i, testfunc, *args):
print(i+1, end=" ", flush=True)
testfunc(*args)
def run_test(testfunc, args, count, within_seconds, parallel):
# Run testfunc count times in within_seconds seconds (and actually
# within a little less time so we're sure we're under the limit).
#
# Because some services are slow, like IMAP, we can't necessarily
# run testfunc sequentially and still get to count requests within
# the required time. So we split the requests across threads.
import requests.exceptions
from multiprocessing import Pool
restart_fail2ban_service()
# Log.
print(testfunc.__name__, " ".join(str(a) for a in args), "...")
# Record the start time so we can know how to evenly space our
# calls to testfunc.
start_time = time.time()
with Pool(parallel) as p:
# Distribute the requests across the pool.
asyncresults = []
for i in range(count):
ar = p.apply_async(testfunc_runner, [i, testfunc] + list(args))
asyncresults.append(ar)
# Wait for all runs to finish.
p.close()
p.join()
# Check for errors.
for ar in asyncresults:
try:
ar.get()
except IsBlocked:
print("Test machine prematurely blocked!")
return False
# Did we make enough requests within the limit?
if (time.time()-start_time) > within_seconds:
raise Exception("Test failed to make %s requests in %d seconds." % (count, within_seconds))
# Wait a moment for the block to be put into place.
time.sleep(4)
# The next call should fail.
print("*", end=" ", flush=True)
try:
testfunc(*args)
except IsBlocked:
# Success -- this one is supposed to be refused.
print("blocked [OK]")
return True # OK
print("not blocked!")
return False
######################################################################
if __name__ == "__main__":
# run tests
# SMTP bans at 10 even though we say 20 in the config because we get
# doubled-up warnings in the logs, we'll let that be for now
run_test(smtp_test, [], 10, 30, 8)
# IMAP
run_test(imap_test, [], 20, 30, 4)
# POP
run_test(pop_test, [], 20, 30, 4)
# Managesieve
run_test(managesieve_test, [], 20, 30, 4)
# Mail-in-a-Box control panel
run_test(http_test, ["/admin/login", 200], 20, 30, 1)
# Munin via the Mail-in-a-Box control panel
run_test(http_test, ["/admin/munin/", 401], 20, 30, 1)
# ownCloud
run_test(http_test, ["/cloud/remote.php/webdav", 401, None, None, [owncloud_user, "aa"]], 20, 120, 1)
# restart fail2ban so that this client machine is no longer blocked
restart_fail2ban_service(final=True)
|
cc0-1.0
|
29a186bced94e5b3029d8583694d7fcb
| 25.114754
| 102
| 0.689266
| 3.171727
| false
| true
| false
| false
|
mail-in-a-box/mailinabox
|
management/auth.py
|
1
|
6049
|
import base64, os, os.path, hmac, json, secrets
from datetime import timedelta
from expiringdict import ExpiringDict
import utils
from mailconfig import get_mail_password, get_mail_user_privileges
from mfa import get_hash_mfa_state, validate_auth_mfa
DEFAULT_KEY_PATH = '/var/lib/mailinabox/api.key'
DEFAULT_AUTH_REALM = 'Mail-in-a-Box Management Server'
class AuthService:
def __init__(self):
self.auth_realm = DEFAULT_AUTH_REALM
self.key_path = DEFAULT_KEY_PATH
self.max_session_duration = timedelta(days=2)
self.init_system_api_key()
self.sessions = ExpiringDict(max_len=64, max_age_seconds=self.max_session_duration.total_seconds())
def init_system_api_key(self):
"""Write an API key to a local file so local processes can use the API"""
with open(self.key_path, 'r') as file:
self.key = file.read()
def authenticate(self, request, env, login_only=False, logout=False):
"""Test if the HTTP Authorization header's username matches the system key, a session key,
or if the username/password passed in the header matches a local user.
Returns a tuple of the user's email address and list of user privileges (e.g.
('my@email', []) or ('my@email', ['admin']); raises a ValueError on login failure.
If the user used the system API key, the user's email is returned as None since
this key is not associated with a user."""
def parse_http_authorization_basic(header):
def decode(s):
return base64.b64decode(s.encode('ascii')).decode('ascii')
if " " not in header:
return None, None
scheme, credentials = header.split(maxsplit=1)
if scheme != 'Basic':
return None, None
credentials = decode(credentials)
if ":" not in credentials:
return None, None
username, password = credentials.split(':', maxsplit=1)
return username, password
username, password = parse_http_authorization_basic(request.headers.get('Authorization', ''))
if username in (None, ""):
raise ValueError("Authorization header invalid.")
if username.strip() == "" and password.strip() == "":
raise ValueError("No email address, password, session key, or API key provided.")
# If user passed the system API key, grant administrative privs. This key
# is not associated with a user.
if username == self.key and not login_only:
return (None, ["admin"])
# If the password corresponds with a session token for the user, grant access for that user.
if self.get_session(username, password, "login", env) and not login_only:
sessionid = password
session = self.sessions[sessionid]
if logout:
# Clear the session.
del self.sessions[sessionid]
else:
# Re-up the session so that it does not expire.
self.sessions[sessionid] = session
# If no password was given, but a username was given, we're missing some information.
elif password.strip() == "":
raise ValueError("Enter a password.")
else:
# The user is trying to log in with a username and a password
# (and possibly a MFA token). On failure, an exception is raised.
self.check_user_auth(username, password, request, env)
# Get privileges for authorization. This call should never fail because by this
# point we know the email address is a valid user --- unless the user has been
# deleted after the session was granted. On error the call will return a tuple
# of an error message and an HTTP status code.
privs = get_mail_user_privileges(username, env)
if isinstance(privs, tuple): raise ValueError(privs[0])
# Return the authorization information.
return (username, privs)
def check_user_auth(self, email, pw, request, env):
# Validate a user's login email address and password. If MFA is enabled,
# check the MFA token in the X-Auth-Token header.
#
# On login failure, raises a ValueError with a login error message. On
# success, nothing is returned.
# Authenticate.
try:
# Get the hashed password of the user. Raise a ValueError if the
# email address does not correspond to a user. But wrap it in the
# same exception as if a password fails so we don't easily reveal
# if an email address is valid.
pw_hash = get_mail_password(email, env)
# Use 'doveadm pw' to check credentials. doveadm will return
# a non-zero exit status if the credentials are no good,
# and check_call will raise an exception in that case.
utils.shell('check_call', [
"/usr/bin/doveadm", "pw",
"-p", pw,
"-t", pw_hash,
])
except:
# Login failed.
raise ValueError("Incorrect email address or password.")
# If MFA is enabled, check that MFA passes.
status, hints = validate_auth_mfa(email, request, env)
if not status:
# Login valid. Hints may have more info.
raise ValueError(",".join(hints))
def create_user_password_state_token(self, email, env):
# Create a token that changes if the user's password or MFA options change
# so that sessions become invalid if any of that information changes.
msg = get_mail_password(email, env).encode("utf8")
# Add to the message the current MFA state, which is a list of MFA information.
# Turn it into a string stably.
msg += b" " + json.dumps(get_hash_mfa_state(email, env), sort_keys=True).encode("utf8")
# Make a HMAC using the system API key as a hash key.
hash_key = self.key.encode('ascii')
return hmac.new(hash_key, msg, digestmod="sha256").hexdigest()
def create_session_key(self, username, env, type=None):
# Create a new session.
token = secrets.token_hex(32)
self.sessions[token] = {
"email": username,
"password_token": self.create_user_password_state_token(username, env),
"type": type,
}
return token
def get_session(self, user_email, session_key, session_type, env):
if session_key not in self.sessions: return None
session = self.sessions[session_key]
if session_type == "login" and session["email"] != user_email: return None
if session["type"] != session_type: return None
if session["password_token"] != self.create_user_password_state_token(session["email"], env): return None
return session
|
cc0-1.0
|
c91b208667972834dae219583c0e7f27
| 38.279221
| 107
| 0.708712
| 3.462507
| false
| false
| false
| false
|
mcedit/pymclevel
|
minecraft_server.py
|
3
|
20215
|
import atexit
import itertools
import logging
import os
from os.path import dirname, join, basename
import random
import re
import shutil
import subprocess
import sys
import tempfile
import time
import urllib
import infiniteworld
from mclevelbase import appSupportDir, exhaust, ChunkNotPresent
log = logging.getLogger(__name__)
__author__ = 'Rio'
# Thank you, Stackoverflow
# http://stackoverflow.com/questions/377017/test-if-executable-exists-in-python
def which(program):
def is_exe(f):
return os.path.exists(f) and os.access(f, os.X_OK)
fpath, _fname = os.path.split(program)
if fpath:
if is_exe(program):
return program
else:
if sys.platform == "win32":
if "SYSTEMROOT" in os.environ:
root = os.environ["SYSTEMROOT"]
exe_file = os.path.join(root, program)
if is_exe(exe_file):
return exe_file
if "PATH" in os.environ:
for path in os.environ["PATH"].split(os.pathsep):
exe_file = os.path.join(path, program)
if is_exe(exe_file):
return exe_file
return None
convert = lambda text: int(text) if text.isdigit() else text
alphanum_key = lambda key: [convert(c) for c in re.split('([0-9]+)', key)]
def sort_nicely(l):
""" Sort the given list in the way that humans expect.
"""
l.sort(key=alphanum_key)
class ServerJarStorage(object):
defaultCacheDir = os.path.join(appSupportDir, u"ServerJarStorage")
def __init__(self, cacheDir=None):
if cacheDir is None:
cacheDir = self.defaultCacheDir
self.cacheDir = cacheDir
if not os.path.exists(self.cacheDir):
os.makedirs(self.cacheDir)
readme = os.path.join(self.cacheDir, "README.TXT")
if not os.path.exists(readme):
with file(readme, "w") as f:
f.write("""
About this folder:
This folder is used by MCEdit and pymclevel to store different versions of the
Minecraft Server to use for terrain generation. It should have one or more
subfolders, one for each version of the server. Each subfolder must hold at
least one file named minecraft_server.jar, and the subfolder's name should
have the server's version plus the names of any installed mods.
There may already be a subfolder here (for example, "Beta 1.7.3") if you have
used the Chunk Create feature in MCEdit to create chunks using the server.
Version numbers can be automatically detected. If you place one or more
minecraft_server.jar files in this folder, they will be placed automatically
into well-named subfolders the next time you run MCEdit. If a file's name
begins with "minecraft_server" and ends with ".jar", it will be detected in
this way.
""")
self.reloadVersions()
def reloadVersions(self):
cacheDirList = os.listdir(self.cacheDir)
self.versions = list(reversed(sorted([v for v in cacheDirList if os.path.exists(self.jarfileForVersion(v))], key=alphanum_key)))
if MCServerChunkGenerator.javaExe:
for f in cacheDirList:
p = os.path.join(self.cacheDir, f)
if f.startswith("minecraft_server") and f.endswith(".jar") and os.path.isfile(p):
print "Unclassified minecraft_server.jar found in cache dir. Discovering version number..."
self.cacheNewVersion(p)
os.remove(p)
print "Minecraft_Server.jar storage initialized."
print u"Each server is stored in a subdirectory of {0} named with the server's version number".format(self.cacheDir)
print "Cached servers: ", self.versions
def downloadCurrentServer(self):
print "Downloading the latest Minecraft Server..."
try:
(filename, headers) = urllib.urlretrieve("http://www.minecraft.net/download/minecraft_server.jar")
except Exception, e:
print "Error downloading server: {0!r}".format(e)
return
self.cacheNewVersion(filename, allowDuplicate=False)
def cacheNewVersion(self, filename, allowDuplicate=True):
""" Finds the version number from the server jar at filename and copies
it into the proper subfolder of the server jar cache folder"""
version = MCServerChunkGenerator._serverVersionFromJarFile(filename)
print "Found version ", version
versionDir = os.path.join(self.cacheDir, version)
i = 1
newVersionDir = versionDir
while os.path.exists(newVersionDir):
if not allowDuplicate:
return
newVersionDir = versionDir + " (" + str(i) + ")"
i += 1
os.mkdir(newVersionDir)
shutil.copy2(filename, os.path.join(newVersionDir, "minecraft_server.jar"))
if version not in self.versions:
self.versions.append(version)
def jarfileForVersion(self, v):
return os.path.join(self.cacheDir, v, "minecraft_server.jar").encode(sys.getfilesystemencoding())
def checksumForVersion(self, v):
jf = self.jarfileForVersion(v)
with file(jf, "rb") as f:
import hashlib
return hashlib.md5(f.read()).hexdigest()
broken_versions = ["Beta 1.9 Prerelease {0}".format(i) for i in (1, 2, 3)]
@property
def latestVersion(self):
if len(self.versions) == 0:
return None
return max((v for v in self.versions if v not in self.broken_versions), key=alphanum_key)
def getJarfile(self, version=None):
if len(self.versions) == 0:
print "No servers found in cache."
self.downloadCurrentServer()
version = version or self.latestVersion
if version not in self.versions:
return None
return self.jarfileForVersion(version)
class JavaNotFound(RuntimeError):
pass
class VersionNotFound(RuntimeError):
pass
def readProperties(filename):
if not os.path.exists(filename):
return {}
with file(filename) as f:
properties = dict((line.split("=", 2) for line in (l.strip() for l in f) if not line.startswith("#")))
return properties
def saveProperties(filename, properties):
with file(filename, "w") as f:
for k, v in properties.iteritems():
f.write("{0}={1}\n".format(k, v))
def findJava():
if sys.platform == "win32":
javaExe = which("java.exe")
if javaExe is None:
KEY_NAME = "HKLM\SOFTWARE\JavaSoft\Java Runtime Environment"
try:
p = subprocess.Popen(["REG", "QUERY", KEY_NAME, "/v", "CurrentVersion"], stdout=subprocess.PIPE, universal_newlines=True)
o, e = p.communicate()
lines = o.split("\n")
for l in lines:
l = l.strip()
if l.startswith("CurrentVersion"):
words = l.split(None, 2)
version = words[-1]
p = subprocess.Popen(["REG", "QUERY", KEY_NAME + "\\" + version, "/v", "JavaHome"], stdout=subprocess.PIPE, universal_newlines=True)
o, e = p.communicate()
lines = o.split("\n")
for l in lines:
l = l.strip()
if l.startswith("JavaHome"):
w = l.split(None, 2)
javaHome = w[-1]
javaExe = os.path.join(javaHome, "bin", "java.exe")
print "RegQuery: java.exe found at ", javaExe
break
except Exception, e:
print "Error while locating java.exe using the Registry: ", repr(e)
else:
javaExe = which("java")
return javaExe
class MCServerChunkGenerator(object):
"""Generates chunks using minecraft_server.jar. Uses a ServerJarStorage to
store different versions of minecraft_server.jar in an application support
folder.
from pymclevel import *
Example usage:
gen = MCServerChunkGenerator() # with no arguments, use the newest
# server version in the cache, or download
# the newest one automatically
level = loadWorldNamed("MyWorld")
gen.generateChunkInLevel(level, 12, 24)
Using an older version:
gen = MCServerChunkGenerator("Beta 1.6.5")
"""
defaultJarStorage = None
javaExe = findJava()
jarStorage = None
tempWorldCache = {}
def __init__(self, version=None, jarfile=None, jarStorage=None):
self.jarStorage = jarStorage or self.getDefaultJarStorage()
if self.javaExe is None:
raise JavaNotFound("Could not find java. Please check that java is installed correctly. (Could not find java in your PATH environment variable.)")
if jarfile is None:
jarfile = self.jarStorage.getJarfile(version)
if jarfile is None:
raise VersionNotFound("Could not find minecraft_server.jar for version {0}. Please make sure that a minecraft_server.jar is placed under {1} in a subfolder named after the server's version number.".format(version or "(latest)", self.jarStorage.cacheDir))
self.serverJarFile = jarfile
self.serverVersion = version or self._serverVersion()
@classmethod
def getDefaultJarStorage(cls):
if cls.defaultJarStorage is None:
cls.defaultJarStorage = ServerJarStorage()
return cls.defaultJarStorage
@classmethod
def clearWorldCache(cls):
cls.tempWorldCache = {}
for tempDir in os.listdir(cls.worldCacheDir):
t = os.path.join(cls.worldCacheDir, tempDir)
if os.path.isdir(t):
shutil.rmtree(t)
def createReadme(self):
readme = os.path.join(self.worldCacheDir, "README.TXT")
if not os.path.exists(readme):
with file(readme, "w") as f:
f.write("""
About this folder:
This folder is used by MCEdit and pymclevel to cache levels during terrain
generation. Feel free to delete it for any reason.
""")
worldCacheDir = os.path.join(tempfile.gettempdir(), "pymclevel_MCServerChunkGenerator")
def tempWorldForLevel(self, level):
# tempDir = tempfile.mkdtemp("mclevel_servergen")
tempDir = os.path.join(self.worldCacheDir, self.jarStorage.checksumForVersion(self.serverVersion), str(level.RandomSeed))
propsFile = os.path.join(tempDir, "server.properties")
properties = readProperties(propsFile)
tempWorld = self.tempWorldCache.get((self.serverVersion, level.RandomSeed))
if tempWorld is None:
if not os.path.exists(tempDir):
os.makedirs(tempDir)
self.createReadme()
worldName = "world"
worldName = properties.setdefault("level-name", worldName)
tempWorldDir = os.path.join(tempDir, worldName)
tempWorld = infiniteworld.MCInfdevOldLevel(tempWorldDir, create=True, random_seed=level.RandomSeed)
tempWorld.close()
tempWorldRO = infiniteworld.MCInfdevOldLevel(tempWorldDir, readonly=True)
self.tempWorldCache[self.serverVersion, level.RandomSeed] = tempWorldRO
if level.dimNo == 0:
properties["allow-nether"] = "false"
else:
tempWorld = tempWorld.getDimension(level.dimNo)
properties["allow-nether"] = "true"
properties["server-port"] = int(32767 + random.random() * 32700)
saveProperties(propsFile, properties)
return tempWorld, tempDir
def generateAtPosition(self, tempWorld, tempDir, cx, cz):
return exhaust(self.generateAtPositionIter(tempWorld, tempDir, cx, cz))
def generateAtPositionIter(self, tempWorld, tempDir, cx, cz, simulate=False):
tempWorldRW = infiniteworld.MCInfdevOldLevel(tempWorld.filename)
tempWorldRW.setPlayerSpawnPosition((cx * 16, 64, cz * 16))
tempWorldRW.saveInPlace()
tempWorldRW.close()
del tempWorldRW
tempWorld.unload()
startTime = time.time()
proc = self.runServer(tempDir)
while proc.poll() is None:
line = proc.stdout.readline().strip()
log.info(line)
yield line
# Forge and FML change stderr output, causing MCServerChunkGenerator to wait endlessly.
#
# Vanilla:
# 2012-11-13 11:29:19 [INFO] Done (9.962s)!
#
# Forge/FML:
# 2012-11-13 11:47:13 [INFO] [Minecraft] Done (8.020s)!
if "INFO" in line and "Done" in line:
if simulate:
duration = time.time() - startTime
simSeconds = max(8, int(duration) + 1)
for i in range(simSeconds):
# process tile ticks
yield "%2d/%2d: Simulating the world for a little bit..." % (i, simSeconds)
time.sleep(1)
proc.stdin.write("stop\n")
proc.wait()
break
if "FAILED TO BIND" in line:
proc.kill()
proc.wait()
raise RuntimeError("Server failed to bind to port!")
stdout, _ = proc.communicate()
if "Could not reserve enough space" in stdout and not MCServerChunkGenerator.lowMemory:
MCServerChunkGenerator.lowMemory = True
for i in self.generateAtPositionIter(tempWorld, tempDir, cx, cz):
yield i
(tempWorld.parentWorld or tempWorld).loadLevelDat() # reload version number
def copyChunkAtPosition(self, tempWorld, level, cx, cz):
if level.containsChunk(cx, cz):
return
try:
tempChunkBytes = tempWorld._getChunkBytes(cx, cz)
except ChunkNotPresent, e:
raise ChunkNotPresent, "While generating a world in {0} using server {1} ({2!r})".format(tempWorld, self.serverJarFile, e), sys.exc_info()[2]
level.worldFolder.saveChunk(cx, cz, tempChunkBytes)
level._allChunks = None
def generateChunkInLevel(self, level, cx, cz):
assert isinstance(level, infiniteworld.MCInfdevOldLevel)
tempWorld, tempDir = self.tempWorldForLevel(level)
self.generateAtPosition(tempWorld, tempDir, cx, cz)
self.copyChunkAtPosition(tempWorld, level, cx, cz)
minRadius = 5
maxRadius = 20
def createLevel(self, level, box, simulate=False, **kw):
return exhaust(self.createLevelIter(level, box, simulate, **kw))
def createLevelIter(self, level, box, simulate=False, **kw):
if isinstance(level, basestring):
filename = level
level = infiniteworld.MCInfdevOldLevel(filename, create=True, **kw)
assert isinstance(level, infiniteworld.MCInfdevOldLevel)
minRadius = self.minRadius
genPositions = list(itertools.product(
xrange(box.mincx, box.maxcx, minRadius * 2),
xrange(box.mincz, box.maxcz, minRadius * 2)))
for i, (cx, cz) in enumerate(genPositions):
log.info("Generating at %s" % ((cx, cz),))
parentDir = dirname(os.path.abspath(level.worldFolder.filename))
propsFile = join(parentDir, "server.properties")
props = readProperties(join(dirname(self.serverJarFile), "server.properties"))
props["level-name"] = basename(level.worldFolder.filename)
props["server-port"] = int(32767 + random.random() * 32700)
saveProperties(propsFile, props)
for p in self.generateAtPositionIter(level, parentDir, cx, cz, simulate):
yield i, len(genPositions), p
level.close()
def generateChunksInLevel(self, level, chunks):
return exhaust(self.generateChunksInLevelIter(level, chunks))
def generateChunksInLevelIter(self, level, chunks, simulate=False):
tempWorld, tempDir = self.tempWorldForLevel(level)
startLength = len(chunks)
minRadius = self.minRadius
maxRadius = self.maxRadius
chunks = set(chunks)
while len(chunks):
length = len(chunks)
centercx, centercz = chunks.pop()
chunks.add((centercx, centercz))
# assume the generator always generates at least an 11x11 chunk square.
centercx += minRadius
centercz += minRadius
# boxedChunks = [cPos for cPos in chunks if inBox(cPos)]
print "Generating {0} chunks out of {1} starting from {2}".format("XXX", len(chunks), (centercx, centercz))
yield startLength - len(chunks), startLength
# chunks = [c for c in chunks if not inBox(c)]
for p in self.generateAtPositionIter(tempWorld, tempDir, centercx, centercz, simulate):
yield startLength - len(chunks), startLength, p
i = 0
for cx, cz in itertools.product(
xrange(centercx - maxRadius, centercx + maxRadius),
xrange(centercz - maxRadius, centercz + maxRadius)):
if level.containsChunk(cx, cz):
chunks.discard((cx, cz))
elif ((cx, cz) in chunks
and all(tempWorld.containsChunk(ncx, ncz) for ncx, ncz in itertools.product(xrange(cx-1, cx+2), xrange(cz-1, cz+2)))
):
self.copyChunkAtPosition(tempWorld, level, cx, cz)
i += 1
chunks.discard((cx, cz))
yield startLength - len(chunks), startLength
if length == len(chunks):
print "No chunks were generated. Aborting."
break
level.saveInPlace()
def runServer(self, startingDir):
if isinstance(startingDir, unicode):
startingDir = startingDir.encode(sys.getfilesystemencoding())
return self._runServer(startingDir, self.serverJarFile)
lowMemory = False
@classmethod
def _runServer(cls, startingDir, jarfile):
log.info("Starting server %s in %s", jarfile, startingDir)
if cls.lowMemory:
memflags = []
else:
memflags = ["-Xmx1024M", "-Xms1024M", ]
proc = subprocess.Popen([cls.javaExe, "-Djava.awt.headless=true"] + memflags + ["-jar", jarfile],
executable=cls.javaExe,
cwd=startingDir,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
universal_newlines=True,
)
atexit.register(proc.terminate)
return proc
def _serverVersion(self):
return self._serverVersionFromJarFile(self.serverJarFile)
@classmethod
def _serverVersionFromJarFile(cls, jarfile):
tempdir = tempfile.mkdtemp("mclevel_servergen")
proc = cls._runServer(tempdir, jarfile)
version = "Unknown"
# out, err = proc.communicate()
# for line in err.split("\n"):
while proc.poll() is None:
line = proc.stdout.readline()
if "Preparing start region" in line:
break
if "Starting minecraft server version" in line:
version = line.split("Starting minecraft server version")[1].strip()
break
if proc.returncode is None:
try:
proc.kill()
except WindowsError:
pass # access denied, process already terminated
proc.wait()
shutil.rmtree(tempdir)
if ";)" in version:
version = version.replace(";)", "") # Damnit, Jeb!
# Versions like "0.2.1" are alphas, and versions like "1.0.0" without "Beta" are releases
if version[0] == "0":
version = "Alpha " + version
try:
if int(version[0]) > 0:
version = "Release " + version
except ValueError:
pass
return version
|
isc
|
d7d9be87202ff9fecb699a0206168e1e
| 35.227599
| 266
| 0.602622
| 4.065768
| false
| false
| false
| false
|
mcedit/pymclevel
|
block_fill.py
|
3
|
3454
|
import logging
import materials
log = logging.getLogger(__name__)
import numpy
from mclevelbase import exhaust
import blockrotation
from entity import TileEntity
def blockReplaceTable(blocksToReplace):
blocktable = numpy.zeros((materials.id_limit, 16), dtype='bool')
for b in blocksToReplace:
if b.hasVariants:
blocktable[b.ID, b.blockData] = True
else:
blocktable[b.ID] = True
return blocktable
def fillBlocks(level, box, blockInfo, blocksToReplace=()):
return exhaust(level.fillBlocksIter(box, blockInfo, blocksToReplace))
def fillBlocksIter(level, box, blockInfo, blocksToReplace=()):
if box is None:
chunkIterator = level.getAllChunkSlices()
box = level.bounds
else:
chunkIterator = level.getChunkSlices(box)
# shouldRetainData = (not blockInfo.hasVariants and not any([b.hasVariants for b in blocksToReplace]))
# if shouldRetainData:
# log.info( "Preserving data bytes" )
shouldRetainData = False # xxx old behavior overwrote blockdata with 0 when e.g. replacing water with lava
log.info("Replacing {0} with {1}".format(blocksToReplace, blockInfo))
changesLighting = True
blocktable = None
if len(blocksToReplace):
blocktable = blockReplaceTable(blocksToReplace)
shouldRetainData = all([blockrotation.SameRotationType(blockInfo, b) for b in blocksToReplace])
newAbsorption = level.materials.lightAbsorption[blockInfo.ID]
oldAbsorptions = [level.materials.lightAbsorption[b.ID] for b in blocksToReplace]
changesLighting = False
for a in oldAbsorptions:
if a != newAbsorption:
changesLighting = True
newEmission = level.materials.lightEmission[blockInfo.ID]
oldEmissions = [level.materials.lightEmission[b.ID] for b in blocksToReplace]
for a in oldEmissions:
if a != newEmission:
changesLighting = True
i = 0
skipped = 0
replaced = 0
for (chunk, slices, point) in chunkIterator:
i += 1
if i % 100 == 0:
log.info(u"Chunk {0}...".format(i))
yield i, box.chunkCount
blocks = chunk.Blocks[slices]
data = chunk.Data[slices]
mask = slice(None)
needsLighting = changesLighting
if blocktable is not None:
mask = blocktable[blocks, data]
blockCount = mask.sum()
replaced += blockCount
# don't waste time relighting and copying if the mask is empty
if blockCount:
blocks[:][mask] = blockInfo.ID
if not shouldRetainData:
data[mask] = blockInfo.blockData
else:
skipped += 1
needsLighting = False
def include(tileEntity):
p = TileEntity.pos(tileEntity)
x, y, z = map(lambda a, b, c: (a - b) - c, p, point, box.origin)
return not ((p in box) and mask[x, z, y])
chunk.TileEntities[:] = filter(include, chunk.TileEntities)
else:
blocks[:] = blockInfo.ID
if not shouldRetainData:
data[:] = blockInfo.blockData
chunk.removeTileEntitiesInBox(box)
chunk.chunkChanged(needsLighting)
if len(blocksToReplace):
log.info(u"Replace: Skipped {0} chunks, replaced {1} blocks".format(skipped, replaced))
|
isc
|
b170273c83cfc015c2c0612b019dc011
| 31.895238
| 111
| 0.621309
| 3.933941
| false
| false
| false
| false
|
mcedit/mcedit
|
filters/CreateSpawners.py
|
1
|
1386
|
# Feel free to modify and use this filter however you wish. If you do,
# please give credit to SethBling.
# http://youtube.com/SethBling
from pymclevel import TAG_Compound
from pymclevel import TAG_Int
from pymclevel import TAG_Short
from pymclevel import TAG_Byte
from pymclevel import TAG_String
from pymclevel import TAG_Float
from pymclevel import TAG_Double
from pymclevel import TAG_List
from pymclevel import TileEntity
displayName = "Create Spawners"
inputs = (
("Include position data", False),
)
def perform(level, box, options):
includePos = options["Include position data"]
entitiesToRemove = []
for (chunk, slices, point) in level.getChunkSlices(box):
for entity in chunk.Entities:
x = int(entity["Pos"][0].value)
y = int(entity["Pos"][1].value)
z = int(entity["Pos"][2].value)
if x >= box.minx and x < box.maxx and y >= box.miny and y < box.maxy and z >= box.minz and z < box.maxz:
entitiesToRemove.append((chunk, entity))
level.setBlockAt(x, y, z, 52)
spawner = TileEntity.Create("MobSpawner")
TileEntity.setpos(spawner, (x, y, z))
spawner["Delay"] = TAG_Short(120)
spawner["SpawnData"] = entity
if not includePos:
del spawner["SpawnData"]["Pos"]
spawner["EntityId"] = entity["id"]
chunk.TileEntities.append(spawner)
for (chunk, entity) in entitiesToRemove:
chunk.Entities.remove(entity)
|
isc
|
38f5e4fd2ba5c948d9c00e5a731308f4
| 27.875
| 107
| 0.702742
| 3.08686
| false
| false
| false
| false
|
mcedit/mcedit
|
filters/surfacerepair.py
|
1
|
2001
|
from numpy import zeros, array
import itertools
#naturally occuring materials
from pymclevel.level import extractHeights
blocktypes = [1, 2, 3, 7, 12, 13, 14, 15, 16, 56, 73, 74, 87, 88, 89]
blockmask = zeros((256,), dtype='bool')
#compute a truth table that we can index to find out whether a block
# is naturally occuring and should be considered in a heightmap
blockmask[blocktypes] = True
displayName = "Chunk Surface Repair"
inputs = (
("Repairs the backwards surfaces made by old versions of Minecraft.", "label"),
)
def perform(level, box, options):
#iterate through the slices of each chunk in the selection box
for chunk, slices, point in level.getChunkSlices(box):
# slicing the block array is straightforward. blocks will contain only
# the area of interest in this chunk.
blocks = chunk.Blocks
data = chunk.Data
# use indexing to look up whether or not each block in blocks is
# naturally-occuring. these blocks will "count" for column height.
maskedBlocks = blockmask[blocks]
heightmap = extractHeights(maskedBlocks)
for x in range(heightmap.shape[0]):
for z in range(x + 1, heightmap.shape[1]):
h = heightmap[x, z]
h2 = heightmap[z, x]
b2 = blocks[z, x, h2]
if blocks[x, z, h] == 1:
h += 2 # rock surface - top 4 layers become 2 air and 2 rock
if blocks[z, x, h2] == 1:
h2 += 2 # rock surface - top 4 layers become 2 air and 2 rock
# topsoil is 4 layers deep
def swap(s1, s2):
a2 = array(s2)
s2[:] = s1[:]
s1[:] = a2[:]
swap(blocks[x, z, h - 3:h + 1], blocks[z, x, h2 - 3:h2 + 1])
swap(data[x, z, h - 3:h + 1], data[z, x, h2 - 3:h2 + 1])
# remember to do this to make sure the chunk is saved
chunk.chunkChanged()
|
isc
|
37d1710d9987494cf6c9f35b78dc2383
| 32.35
| 82
| 0.574713
| 3.638182
| false
| false
| false
| false
|
josephmisiti/awesome-machine-learning
|
scripts/pull_R_packages.py
|
1
|
1150
|
#!/usr/bin/python
"""
This script will scrape the r-project.org machine learning selection and
format the packages in github markdown style for this
awesome-machine-learning repo.
"""
from pyquery import PyQuery as pq
import urllib
import codecs
import random
text_file = codecs.open("Packages.txt", encoding='utf-8', mode="w")
d = pq(url='http://cran.r-project.org/web/views/MachineLearning.html',
opener=lambda url, **kw: urllib.urlopen(url).read())
for e in d("li").items():
package_name = e("a").html()
package_link = e("a")[0].attrib['href']
if '..' in package_link:
package_link = package_link.replace("..",
'http://cran.r-project.org/web')
dd = pq(url=package_link, opener=lambda url,
**kw: urllib.urlopen(url).read())
package_description = dd("h2").html()
text_file.write(" [%s](%s) - %s \n" % (package_name, package_link,
package_description))
# print("* [%s](%s) - %s" % (package_name,package_link,
# package_description))
|
cc0-1.0
|
9ccae99e7636264480a1f1c402562478
| 37.333333
| 76
| 0.565217
| 3.674121
| false
| false
| false
| false
|
mozilla-services/tecken
|
docs/exts/adr_log.py
|
1
|
5841
|
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at https://mozilla.org/MPL/2.0/.
"""Directive for generating an ADR log from a directory of ADRs.
Usage::
.. adrlog:: PATH
.. adrlog:: PATH
:urlroot: https://github.com/mozilla-services/socorro/tree/main/docs/adr
Required parameters:
* PATH: the path relative to the docs/ directory to the ADR directory
Optional parameters:
* urlroot: the absolute url where the ADR files are located
"""
import dataclasses
import os
import os.path
from typing import Dict
from docutils import nodes
from docutils.parsers.rst import Directive, directives
from docutils.statemachine import ViewList
@dataclasses.dataclass(order=True)
class ADR:
adr_id: str
name: str
metadata: Dict[str, str]
def fetch_adr(filepath):
"""Parses an ADR at filepath and returns ADR
:param filepath: path to ADR file in Markdown format
:returns: ADR
"""
with open(filepath) as fp:
source = fp.read()
# NOTE(willkg): I didn't want to require a markdown parser, so this just looks at
# Socorro's ADR log structure which is a header followed by a list of meta
# information
adr_id = os.path.splitext(os.path.basename(filepath))[0]
name = ""
metadata = {}
STATE_DEFAULT, STATE_LIST = range(2)
state = STATE_DEFAULT
for line in source.splitlines():
line = line.rstrip()
if state == STATE_DEFAULT:
if not line:
continue
elif line.startswith("# "):
name = line[2:]
elif line.startswith("- "):
state = STATE_LIST
if ":" not in line:
continue
key, val = line.split(":", 1)
metadata[key[2:].strip()] = val.strip()
if state == STATE_LIST:
if not line:
# If we hit an empty line while parsing the first list, then we're done
# and we can stop parsing
break
if ":" not in line:
continue
key, val = line.split(":", 1)
metadata[key[2:].strip()] = val.strip()
return ADR(adr_id=adr_id, name=name, metadata=metadata)
def fetch_adrs(filepath):
"""Given a filepath to an ADRs directory, returns the log
:param filepath: the filepath to ADR directory
:returns: list of ADRs
"""
adrs = []
for fn in os.listdir(filepath):
if not fn.endswith(".md"):
continue
if fn in ["index.md", "README.md", "template.md"]:
continue
fn = os.path.join(filepath, fn)
adrs.append(fetch_adr(fn))
return adrs
def build_table(table):
"""Generates reST for a table.
:param table: a 2d array of rows and columns
:returns: list of strings
"""
output = []
col_size = [0] * len(table[0])
for row in table:
for i, col in enumerate(row):
col_size[i] = max(col_size[i], len(col))
col_size = [width + 2 for width in col_size]
# Build header
output.append(" ".join("=" * width for width in col_size))
output.append(
" ".join(
header + (" " * (width - len(header)))
for header, width in zip(table[0], col_size)
)
)
output.append(" ".join("=" * width for width in col_size))
# Iterate through rows
for row in table[1:]:
output.append(
" ".join(
col + (" " * (width - len(col)))
for col, width in zip(row, col_size)
)
)
output.append(" ".join("=" * width for width in col_size))
return output
class ADRLogDirective(Directive):
"""Directive for showing an ADR log."""
has_content = False
required_arguments = 1
optional_arguments = 0
final_argument_whitespace = False
option_spec = {
"urlroot": directives.unchanged_required,
}
def add_line(self, line, source, *lineno):
"""Add a line to the result"""
self.result.append(line, source, *lineno)
def generate_log(self, filepath, urlroot):
def linkify(adr_id, urlroot):
if urlroot:
return f"`{adr_id} <{urlroot}/{adr_id}.md>`_"
return adr_id
adrs = fetch_adrs(filepath)
adrs.sort(reverse=True) # key=lambda adr: adr.adr_id, reverse=True)
table = [["Date", "ADR id", "Status", "Name", "Deciders"]]
for adr in adrs:
table.append(
[
adr.metadata.get("Date", "Unknown"),
linkify(adr.adr_id, urlroot),
adr.metadata.get("Status", "Unknown"),
adr.name,
adr.metadata.get("Deciders", "Unknown"),
]
)
sourcename = "adrlog %s" % filepath
for line in build_table(table):
self.add_line(line, sourcename)
def run(self):
if "urlroot" in self.options:
urlroot = self.options["urlroot"]
else:
urlroot = ""
self.reporter = self.state.document.reporter
self.result = ViewList()
filepath = os.path.abspath(self.arguments[0]).rstrip("/")
self.generate_log(filepath, urlroot)
if not self.result:
return []
node = nodes.paragraph()
node.document = self.state.document
self.state.nested_parse(self.result, 0, node)
return node.children
def setup(app):
"""Register directive in Sphinx."""
app.add_directive("adrlog", ADRLogDirective)
return {
"version": "1.0",
"parallel_read_safe": True,
"parallel_write_safe": True,
}
|
mpl-2.0
|
c60c712f5069b89643ca02e08dfecf35
| 24.96
| 87
| 0.563602
| 3.810176
| false
| false
| false
| false
|
mozilla-services/tecken
|
tecken/useradmin/management/commands/is-blocked-in-auth0.py
|
1
|
1430
|
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at https://mozilla.org/MPL/2.0/.
from urllib.parse import urlparse
from django.conf import settings
from django.core.management.base import BaseCommand, CommandError
from tecken.librequests import session_with_retries
from tecken.useradmin.middleware import find_users
class Command(BaseCommand):
help = "Find out if a user is blocked in Auth0 on the command line"
def add_arguments(self, parser):
parser.add_argument("email")
def handle(self, *args, **options):
email = options["email"]
if " " in email or email.count("@") != 1:
raise CommandError(f"Invalid email {email!r}")
session = session_with_retries()
users = find_users(
settings.OIDC_RP_CLIENT_ID,
settings.OIDC_RP_CLIENT_SECRET,
urlparse(settings.OIDC_OP_USER_ENDPOINT).netloc,
email,
session,
)
for user in users:
if user.get("blocked"):
self.stdout.write(self.style.ERROR("BLOCKED!"))
else:
self.stdout.write(self.style.SUCCESS("NOT blocked!"))
break
else:
self.stdout.write(
self.style.WARNING(f"{email} could not be found in Auth0")
)
|
mpl-2.0
|
ab4c7e3a743e5ef908f91e09d8b77006
| 33.878049
| 74
| 0.61958
| 4.028169
| false
| false
| false
| false
|
mozilla-services/tecken
|
eliot-service/eliot/health_resource.py
|
1
|
1629
|
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at https://mozilla.org/MPL/2.0/.
"""
Application-health related Falcon resources.
"""
import json
from dockerflow.version import get_version
import falcon
import markus
METRICS = markus.get_metrics(__name__)
class BrokenResource:
"""Handle ``/__broken__`` endpoint."""
def on_get(self, req, resp):
"""Implement GET HTTP request."""
METRICS.incr("broken.count")
# This is intentional breakage
raise Exception("intentional exception")
class VersionResource:
"""Handle ``/__version__`` endpoint."""
def __init__(self, basedir):
self.basedir = basedir
def on_get(self, req, resp):
"""Implement GET HTTP request."""
METRICS.incr("version.count")
resp.status = falcon.HTTP_200
resp.text = json.dumps(get_version(self.basedir) or {})
class LBHeartbeatResource:
"""Handle ``/__lbheartbeat__`` to let the load balancing know application health."""
def on_get(self, req, resp):
"""Implement GET HTTP request."""
METRICS.incr("lbheartbeat.count")
resp.content_type = "application/json; charset=utf-8"
resp.status = falcon.HTTP_200
class HeartbeatResource:
"""Handle ``/__heartbeat__`` for app health."""
def on_get(self, req, resp):
"""Implement GET HTTP request."""
METRICS.incr("heartbeat.count")
resp.content_type = "application/json; charset=utf-8"
resp.status = falcon.HTTP_200
|
mpl-2.0
|
58af94c9ce1027fba519ae0f992c7d3c
| 26.610169
| 88
| 0.642112
| 3.814988
| false
| false
| false
| false
|
mozilla-services/tecken
|
systemtests/bin/make-stacks.py
|
1
|
4833
|
#!/usr/bin/env python
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at https://mozilla.org/MPL/2.0/.
# Fetches processed crash data for given crash ids and generates
# stacks for use with the Symbolication API. This has two modes:
#
# * print: prints the stack for a single crash id to stdout
# * save: saves one or more stacks for specified crash ids to the file
# system
#
# Usage: ./bin/make-stacks.py print [CRASHID]
#
# Usage: ./bin/make-stacks.py save [OUTPUTDIR] [CRASHID] [CRASHID...]
import json
import os
import sys
import click
import requests
PROCESSED_CRASH_API = "https://crash-stats.mozilla.org/api/ProcessedCrash"
def fetch_crash_report(crashid):
"""Fetch processed crash data from crash-stats
:param crashid: the crash id
:returns: processed crash as a dict
"""
headers = {"User-Agent": "tecken-systemtests"}
resp = requests.get(
PROCESSED_CRASH_API, params={"crash_id": crashid}, headers=headers
)
resp.raise_for_status()
return resp.json()
def build_stack(data):
"""Convert processed crash to a Symbolicate API payload
:param data: the processed crash as a dict
:returns: Symbolicate API payload
"""
json_dump = data.get("json_dump") or {}
if not json_dump:
return {}
crashing_thread = json_dump.get("crashing_thread") or {}
if not crashing_thread:
return {}
modules = []
modules_list = []
for module in json_dump.get("modules") or []:
debug_file = module.get("debug_file") or ""
debug_id = module.get("debug_id") or ""
# Add the module information to the map
modules.append((debug_file, debug_id))
# Keep track of which modules are at which index
modules_list.append(module.get("filename") or "unknown")
stack = []
for frame in crashing_thread.get("frames") or []:
if frame.get("module"):
module_index = modules_list.index(frame["module"])
else:
# -1 indicates the module is unknown
module_index = -1
if frame.get("module_offset"):
module_offset = int(frame["module_offset"], base=16)
else:
# -1 indicates the module_offset is unknown
module_offset = -1
stack.append((module_index, module_offset))
return {
"stacks": [stack],
"memoryMap": modules,
# NOTE(willkg): we mark this as version 5 so we can use curl on the
# json files directly
"version": 5,
}
@click.group()
def make_stacks_group():
"""Generate stacks for symbolication from existing processed crash data."""
@make_stacks_group.command("print")
@click.option(
"--pretty/--no-pretty", default=False, help="Whether or not to print it pretty."
)
@click.argument("crashid", nargs=1)
@click.pass_context
def make_stacks_print(ctx, pretty, crashid):
"""Generate a stack from a processed crash and print it to stdout."""
crashid = crashid.strip()
crash_report = fetch_crash_report(crashid)
stack = build_stack(crash_report)
if pretty:
kwargs = {"indent": 2}
else:
kwargs = {}
print(json.dumps(stack, **kwargs))
@make_stacks_group.command("save")
@click.argument("outputdir")
@click.argument("crashids", nargs=-1)
@click.pass_context
def make_stacks_save(ctx, outputdir, crashids):
"""Generate stacks from processed crashes and save to file-system."""
# Handle crash ids from stdin or command line
if not crashids and not sys.stdin.isatty():
crashids = list(click.get_text_stream("stdin").readlines())
if not crashids:
raise click.BadParameter(
"No crashids provided.", ctx=ctx, param="crashids", param_hint="crashids"
)
if not os.path.exists(outputdir):
raise click.BadParameter(
"Outputdir does not exist.",
ctx=ctx,
param="outputdir",
param_hint="outputdir",
)
click.echo(f"Creating stacks and saving them to {outputdir!r} ...")
for crashid in crashids:
crashid = crashid.strip()
if crashid.startswith("#"):
continue
print(f"{crashid} ...")
crash_report = fetch_crash_report(crashid)
try:
data = build_stack(crash_report)
except Exception as exc:
click.echo(f"Exception thrown: {exc!r}")
data = None
if not data or not data["stacks"][0]:
click.echo("Nothing to save.")
continue
with open(os.path.join(outputdir, "%s.json" % crashid), "w") as fp:
json.dump(data, fp, indent=2)
click.echo("Done!")
if __name__ == "__main__":
make_stacks_group()
|
mpl-2.0
|
7654f25d6a753dd42548c0b41d787ce6
| 28.290909
| 85
| 0.622388
| 3.73493
| false
| false
| false
| false
|
mozilla-services/tecken
|
tecken/upload/forms.py
|
1
|
4406
|
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at https://mozilla.org/MPL/2.0/.
import os
from urllib.parse import urlparse
from requests.exceptions import ConnectionError, RetryError
from django import forms
from django.conf import settings
from tecken.librequests import session_with_retries
class UploadByDownloadRemoteError(Exception):
"""Happens when the upload-by-download URL is failing in a "transient" way.
For example, if the URL (when GET'ing) causes a ConnectionError or if it works
but returns a >=500 error. In those cases, we want to make sure the client
is informed "more strongly" than just getting a "400 Bad Request".
As a note;
See https://dxr.mozilla.org/mozilla-central/rev/423bdf7a802b0d302244492b423609187de39f56/toolkit/crashreporter/tools/upload_symbols.py#116 # noqa
The Taskcluster symbol uploader knows to retry on any 5xx error. That's
meant to reflect 5xx in Tecken. But by carrying the 5xx from the
upload-by-download URL, we're doing them a favor.
"""
class UploadByDownloadForm(forms.Form):
url = forms.URLField()
def clean_url(self):
url = self.cleaned_data["url"]
# The URL has to be https:// to start with
parsed = urlparse(url)
if not settings.ALLOW_UPLOAD_BY_ANY_DOMAIN:
if parsed.scheme != "https":
raise forms.ValidationError("Insecure URL")
self._check_url_domain(url)
return url
@staticmethod
def _check_url_domain(url):
netloc_wo_port = urlparse(url).netloc.split(":")[0]
if not settings.ALLOW_UPLOAD_BY_ANY_DOMAIN:
if netloc_wo_port not in settings.ALLOW_UPLOAD_BY_DOWNLOAD_DOMAINS:
raise forms.ValidationError(
f"Not an allowed domain ({netloc_wo_port!r}) " "to download from."
)
def clean(self):
cleaned_data = super().clean()
if "url" in cleaned_data:
# In the main view code where the download actually happens,
# it'll follow any redirects automatically, but we want to
# do "recursive HEADs" to find out the size of the file.
# It also gives us an opportunity to record the redirect trail.
url = cleaned_data["url"]
parsed = urlparse(url)
response, redirect_urls = self.get_final_response(url)
content_length = response.headers["content-length"]
cleaned_data["upload"] = {
"name": os.path.basename(parsed.path),
"size": int(content_length),
"redirect_urls": redirect_urls,
}
return cleaned_data
@staticmethod
def get_final_response(initial_url, max_redirects=5):
"""return the final response when it 200 OK'ed and a list of URLs
that we had to go through redirects of."""
redirect_urls = [] # the mutable "store"
def get_response(url):
try:
response = session_with_retries().head(url)
status_code = response.status_code
except ConnectionError:
raise UploadByDownloadRemoteError(
f"ConnectionError trying to open {url}"
)
except RetryError:
raise UploadByDownloadRemoteError(f"RetryError trying to open {url}")
if status_code >= 500:
raise UploadByDownloadRemoteError(f"{url} errored ({status_code})")
if status_code >= 400:
raise forms.ValidationError(f"{url} can't be found ({status_code})")
if status_code >= 300 and status_code < 400:
redirect_url = response.headers["location"]
redirect_urls.append(redirect_url)
# Only do this if we haven't done it "too much" yet.
if len(redirect_urls) > max_redirects:
raise forms.ValidationError(
f"Too many redirects trying to open {initial_url}"
)
return get_response(redirect_url)
assert status_code >= 200 and status_code < 300, status_code
return response
final_response = get_response(initial_url)
return final_response, redirect_urls
|
mpl-2.0
|
10814f19975a341d7890b21ff6695121
| 41.365385
| 149
| 0.618021
| 4.273521
| false
| false
| false
| false
|
pimutils/todoman
|
tests/test_ui.py
|
1
|
4822
|
from datetime import datetime
from unittest import mock
import pytest
import pytz
from freezegun import freeze_time
from urwid import ExitMainLoop
from todoman.interactive import TodoEditor
def test_todo_editor_priority(default_database, todo_factory, default_formatter):
todo = todo_factory(priority=1)
lists = list(default_database.lists())
editor = TodoEditor(todo, lists, default_formatter)
assert editor._priority.label == "high"
editor._priority.keypress(10, "right")
with pytest.raises(ExitMainLoop): # Look at editor._msg_text if this fails
editor._keypress("ctrl s")
assert todo.priority == 0
def test_todo_editor_list(default_database, todo_factory, default_formatter, tmpdir):
tmpdir.mkdir("another_list")
default_database.paths = [
str(tmpdir.join("default")),
str(tmpdir.join("another_list")),
]
default_database.update_cache()
todo = todo_factory()
lists = list(default_database.lists())
editor = TodoEditor(todo, lists, default_formatter)
default_list = next(filter(lambda x: x.label == "default", editor.list_selector))
another_list = next(
filter(lambda x: x.label == "another_list", editor.list_selector)
)
assert editor.current_list == todo.list
assert default_list.label == todo.list.name
another_list.set_state(True)
editor._save_inner()
assert editor.current_list == todo.list
assert another_list.label == todo.list.name
def test_todo_editor_summary(default_database, todo_factory, default_formatter):
todo = todo_factory()
lists = list(default_database.lists())
editor = TodoEditor(todo, lists, default_formatter)
assert editor._summary.edit_text == "YARR!"
editor._summary.edit_text = "Goodbye"
with pytest.raises(ExitMainLoop): # Look at editor._msg_text if this fails
editor._keypress("ctrl s")
assert todo.summary == "Goodbye"
@freeze_time("2017-03-04 14:00:00", tz_offset=4)
def test_todo_editor_due(default_database, todo_factory, default_formatter):
tz = pytz.timezone("CET")
todo = todo_factory(due=datetime(2017, 3, 4, 14))
lists = list(default_database.lists())
default_formatter.tz = tz
editor = TodoEditor(todo, lists, default_formatter)
assert editor._due.edit_text == "2017-03-04 14:00"
editor._due.edit_text = "2017-03-10 12:00"
with pytest.raises(ExitMainLoop): # Look at editor._msg_text if this fails
editor._keypress("ctrl s")
assert todo.due == datetime(2017, 3, 10, 12, tzinfo=tz)
def test_toggle_help(default_database, default_formatter, todo_factory):
todo = todo_factory()
lists = list(default_database.lists())
editor = TodoEditor(todo, lists, default_formatter)
editor._loop = mock.MagicMock()
assert editor._help_text not in editor.left_column.body.contents
editor._keypress("f1")
# Help text is made visible
assert editor._help_text in editor.left_column.body.contents
# Called event_loop.draw_screen
assert editor._loop.draw_screen.call_count == 1
assert editor._loop.draw_screen.call_args == mock.call()
editor._keypress("f1")
# Help text is made visible
assert editor._help_text not in editor.left_column.body.contents
# Called event_loop.draw_screen
assert editor._loop.draw_screen.call_count == 2
assert editor._loop.draw_screen.call_args == mock.call()
def test_show_save_errors(default_database, default_formatter, todo_factory):
todo = todo_factory()
lists = list(default_database.lists())
editor = TodoEditor(todo, lists, default_formatter)
# editor._loop = mock.MagicMock()
editor._due.set_edit_text("not a date")
editor._keypress("ctrl s")
assert (
editor.left_column.body.contents[2].get_text()[0]
== "Time description not recognized: not a date"
)
@pytest.mark.parametrize("completed", [True, False])
@pytest.mark.parametrize("check", [True, False])
def test_save_completed(check, completed, default_formatter, todo_factory):
todo = todo_factory()
if completed:
todo.complete()
editor = TodoEditor(todo, [todo.list], default_formatter)
editor._completed.state = check
with pytest.raises(ExitMainLoop):
editor._keypress("ctrl s")
assert todo.is_completed is check
def test_ctrl_c_clears(default_formatter, todo_factory):
todo = todo_factory()
editor = TodoEditor(todo, [todo.list], default_formatter)
# Simulate that ctrl+c gets pressed, since we can't *really* do that
# trivially inside unit tests.
with mock.patch(
"urwid.main_loop.MainLoop.run", side_effect=KeyboardInterrupt
), mock.patch(
"urwid.main_loop.MainLoop.stop",
) as mocked_stop:
editor.edit()
assert mocked_stop.call_count == 1
|
isc
|
b3d6b385deacf8417a04794015c156d6
| 30.311688
| 85
| 0.684985
| 3.601195
| false
| true
| false
| false
|
pimutils/todoman
|
docs/source/conf.py
|
1
|
2282
|
#!/usr/bin/env python3
import todoman
from todoman.configuration import CONFIG_SPEC
from todoman.configuration import NO_DEFAULT
# -- Generate confspec.rst ----------------------------------------------
def confspec_rst():
"""Generator that returns lines for the confspec doc page."""
for name, type_, default, description, _validation in sorted(CONFIG_SPEC):
if default == NO_DEFAULT:
formatted_default = "None, this field is mandatory."
elif isinstance(default, str):
formatted_default = f'``"{default}"``'
else:
formatted_default = f"``{default}``"
yield f"\n.. _main-{name}:"
yield f"\n\n.. object:: {name}\n"
yield " " + "\n ".join(line for line in description.splitlines())
yield "\n\n"
if isinstance(type_, tuple):
yield f" :type: {type_[0].__name__}"
else:
yield f" :type: {type_.__name__}"
yield f"\n :default: {formatted_default}\n"
with open("confspec.tmp", "w") as file_:
file_.writelines(confspec_rst())
# -- General configuration ------------------------------------------------
extensions = [
"sphinx_click.ext",
"sphinx.ext.autodoc",
"sphinx_autorun",
"sphinx.ext.todo",
"sphinx.ext.viewcode",
"sphinx_rtd_theme",
]
source_suffix = ".rst"
master_doc = "index"
project = "Todoman"
copyright = "2015-2020, Hugo Osvaldo Barrera"
author = "Hugo Osvaldo Barrera <hugo@barrera.io>, et al"
# The short X.Y version.
version = todoman.__version__
# The full version, including alpha/beta/rc tags.
release = todoman.__version__
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = "sphinx"
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = True
# -- Options for HTML output ----------------------------------------------
html_theme = "sphinx_rtd_theme"
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(
"man",
"todo",
"a simple, standards-based, cli todo manager",
[author],
1,
)
]
|
isc
|
1513b180ca2b449d39537a5e91fcbeb0
| 26.493976
| 79
| 0.56617
| 3.716612
| false
| true
| false
| false
|
mozilla-services/autopush
|
autopush/base.py
|
1
|
3544
|
import sys
import uuid
from typing import TYPE_CHECKING
import cyclone.web
from twisted.logger import Logger
from twisted.python import failure
if TYPE_CHECKING: # pragma: nocover
from autopush.config import AutopushConfig # noqa
from autopush.db import DatabaseManager # noqa
from autopush.metrics import IMetrics # noqa
class BaseHandler(cyclone.web.RequestHandler):
"""Base cyclone RequestHandler for autopush"""
log = Logger()
def initialize(self):
"""Initialize info from the client"""
self._client_info = self._init_info()
@property
def conf(self):
# type: () -> AutopushConfig
return self.application.conf
@property
def db(self):
# type: () -> DatabaseManager
return self.application.db
@property
def metrics(self):
# type: () -> IMetrics
return self.db.metrics
def _init_info(self):
return dict(
ami_id=self.conf.ami_id,
request_id=str(uuid.uuid4()),
user_agent=self.request.headers.get('user-agent', ""),
remote_ip=self.request.headers.get('x-forwarded-for',
self.request.remote_ip),
authorization=self.request.headers.get('authorization', ""),
message_ttl=self.request.headers.get('ttl', None),
uri=self.request.uri,
python_version=sys.version,
)
def write_error(self, code, **kwargs):
"""Write the error (otherwise unhandled exception when dealing with
unknown method specifications.)
This is a Cyclone API Override method used by endpoint and
websocket.
"""
try:
self.set_status(code)
if 'exc_info' in kwargs:
self.log.failure(
format=kwargs.get('format', "Exception"),
failure=failure.Failure(*kwargs['exc_info']),
client_info=self._client_info)
else:
self.log.error("Error in handler: %s" % code,
client_info=self._client_info)
self.finish()
except Exception as ex:
self.log.failure(
"error in write_error: {}:{} while printing {};{}".format(
code, ex, kwargs, self._client_info))
def authenticate_peer_cert(self):
"""Authenticate the client per the configured client_certs.
Aborts the request w/ a 401 on failure.
"""
cert = self.request.connection.transport.getPeerCertificate()
if cert:
cert_signature = cert.digest('sha256')
cn = cert.get_subject().CN
auth = self.conf.client_certs.get(cert_signature)
if auth is not None:
# TLS authenticated
self._client_info.update(tls_auth=auth,
tls_auth_sha256=cert_signature,
tls_auth_cn=cn)
return
self._client_info.update(tls_failed_sha256=cert_signature,
tls_failed_cn=cn)
self.log.warn("Failed TLS auth", client_info=self._client_info)
self.set_status(401)
# "Transport mode" isn't standard, inspired by:
# http://www6.ietf.org/mail-archive/web/tls/current/msg05589.html
self.set_header('WWW-Authenticate',
'Transport mode="tls-client-certificate"')
self.finish()
|
mpl-2.0
|
8cad22eeda4983dbe788ef4a5f2aa0ae
| 33.407767
| 75
| 0.561512
| 4.332518
| false
| false
| false
| false
|
mozilla-services/autopush
|
autopush/router/webpush.py
|
1
|
10390
|
"""WebPush Style Autopush Router
This router handles notifications that should be dispatched to an Autopush
node, or stores each individual message, along with its data, in a Message
table for retrieval by the client.
"""
import json
import time
from StringIO import StringIO
from typing import Any # noqa
from botocore.exceptions import ClientError
from twisted.internet.threads import deferToThread
from twisted.web.client import FileBodyProducer
from twisted.internet.defer import (
inlineCallbacks,
returnValue,
CancelledError,
)
from twisted.internet.error import (
ConnectError,
ConnectionClosed,
ConnectionRefusedError,
)
from twisted.logger import Logger
from twisted.web._newclient import ResponseFailed
from twisted.web.http import PotentialDataLoss
from autopush.exceptions import ItemNotFound, RouterException
from autopush.metrics import make_tags
from autopush.protocol import IgnoreBody
from autopush.router.interface import RouterResponse
from autopush.types import JSONDict # noqa
TTL_URL = "https://webpush-wg.github.io/webpush-protocol/#rfc.section.6.2"
class WebPushRouter(object):
"""Implements :class: `autopush.router.interface.IRouter` for internal
routing to an autopush node
"""
log = Logger()
def __init__(self, conf, router_conf, db, agent):
"""Create a new Router"""
self.conf = conf
self.router_conf = router_conf
self.db = db
self.agent = agent
@property
def metrics(self):
return self.db.metrics
def register(self, uaid, router_data, app_id, *args, **kwargs):
# type: (str, JSONDict, str, *Any, **Any) -> None
"""No additional routing data"""
def amend_endpoint_response(self, response, router_data):
# type: (JSONDict, JSONDict) -> None
"""Stubbed out for this router"""
@inlineCallbacks
def route_notification(self, notification, uaid_data):
"""Route a notification to an internal node, and store it if the node
can't deliver immediately or is no longer a valid node
"""
# Determine if they're connected at the moment
node_id = uaid_data.get("node_id")
uaid = uaid_data["uaid"]
router = self.db.router
# Node_id is present, attempt delivery.
# - Send Notification to node
# - Success: Done, return 200
# - Error (Node busy): Jump to Save notification below
# - Error (Client gone, node gone/dead): Clear node entry for user
# - Both: Done, return 503
if node_id:
result = None
try:
result = yield self._send_notification(uaid, node_id,
notification)
except (ConnectError, ConnectionClosed, ResponseFailed,
CancelledError, PotentialDataLoss) as exc:
self.metrics.increment("updates.client.host_gone")
yield deferToThread(router.clear_node,
uaid_data).addErrback(self._eat_db_err)
if isinstance(exc, ConnectionRefusedError):
# Occurs if an IP record is now used by some other node
# in AWS or if the connection timesout.
self.log.debug("Could not route message: {exc}", exc=exc)
if result and result.code == 200:
returnValue(self.delivered_response(notification))
# Save notification, node is not present or busy
# - Save notification
# - Success (older version): Done, return 202
# - Error (db error): Done, return 503
try:
yield self._save_notification(uaid_data, notification)
except ClientError as e:
log_exception = (e.response["Error"]["Code"] !=
"ProvisionedThroughputExceededException")
raise RouterException("Error saving to database",
status_code=503,
response_body="Retry Request",
log_exception=log_exception,
errno=201)
# - Lookup client again to get latest node state after save.
# - Success (node found): Notify node of new notification
# - Success: Done, return 200
# - Error (no client): Done, return 202
# - Error (no node): Clear node entry
# - Both: Done, return 202
# - Success (no node): Done, return 202
# - Error (db error): Done, return 202
# - Error (no client) : Done, return 404
try:
uaid_data = yield deferToThread(router.get_uaid, uaid)
except ClientError:
returnValue(self.stored_response(notification))
except ItemNotFound:
self.metrics.increment("updates.client.deleted")
raise RouterException("User was deleted",
status_code=410,
response_body="Invalid UAID",
log_exception=False,
errno=105)
# Verify there's a node_id in here, if not we're done
node_id = uaid_data.get("node_id")
if not node_id:
returnValue(self.stored_response(notification))
try:
result = yield self._send_notification_check(uaid, node_id)
except (ConnectError, ConnectionClosed, ResponseFailed) as exc:
self.metrics.increment("updates.client.host_gone")
if isinstance(exc, ConnectionRefusedError):
self.log.debug("Could not route message: {exc}", exc=exc)
yield deferToThread(
router.clear_node,
uaid_data).addErrback(self._eat_db_err)
returnValue(self.stored_response(notification))
if result.code == 200:
returnValue(self.delivered_response(notification))
else:
ret_val = self.stored_response(notification)
returnValue(ret_val)
def delivered_response(self, notification):
self.metrics.increment("notification.message_data",
notification.data_length,
tags=make_tags(destination='Direct'))
location = "%s/m/%s" % (self.conf.endpoint_url, notification.location)
return RouterResponse(status_code=201, response_body="",
headers={"Location": location,
"TTL": notification.ttl or 0},
logged_status=200)
def stored_response(self, notification):
self.metrics.increment("notification.message_data",
notification.data_length,
tags=make_tags(destination='Stored'))
location = "%s/m/%s" % (self.conf.endpoint_url, notification.location)
# RFC https://tools.ietf.org/html/rfc8030#section-5
# all responses should be 201, unless this is a push reciept request,
# which requires a 202 and a URL that can be checked later for UA
# acknowledgement. (We don't support that yet. See autopush-rs#244)
return RouterResponse(status_code=201, response_body="",
headers={"Location": location,
"TTL": notification.ttl},
logged_status=201)
#############################################################
# Blocking Helper Functions
#############################################################
def _send_notification(self, uaid, node_id, notification):
"""Send a notification to a specific node_id
This version of the overriden method includes the necessary crypto
headers for the notification.
:type notification: autopush.utils.WebPushNotification
"""
payload = notification.serialize()
payload["timestamp"] = int(time.time())
url = node_id + "/push/" + uaid
request = self.agent.request(
"PUT",
url.encode("utf8"),
bodyProducer=FileBodyProducer(StringIO(json.dumps(payload))),
)
request.addCallback(IgnoreBody.ignore)
return request
def _send_notification_check(self, uaid, node_id):
"""Send a command to the node to check for notifications"""
url = node_id + "/notif/" + uaid
return self.agent.request(
"PUT",
url.encode("utf8"),
).addCallback(IgnoreBody.ignore)
def _save_notification(self, uaid_data, notification):
"""Saves a notification, returns a deferred.
This version of the overridden method saves each individual message
to the message table along with relevant request headers if
available.
:type uaid_data: dict
"""
month_table = uaid_data["current_month"]
if notification.ttl is None:
# Note that this URL is temporary, as well as this warning as
# we will 400 all missing TTL's eventually
raise RouterException(
"Missing TTL Header",
response_body="Missing TTL Header, see: %s" % TTL_URL,
status_code=400,
errno=111,
log_exception=False,
)
if notification.ttl == 0:
location = "%s/m/%s" % (self.conf.endpoint_url,
notification.version)
raise RouterException("Finished Routing", status_code=201,
log_exception=False,
headers={"TTL": str(notification.ttl),
"Location": location},
logged_status=204)
return deferToThread(
self.db.message_table(month_table).store_message,
notification=notification,
)
#############################################################
# Error Callbacks
#############################################################
def _eat_db_err(self, fail):
"""errBack for ignoring provisioned throughput errors"""
fail.trap(ClientError)
|
mpl-2.0
|
99a381a862716c0e8ccf4a6825fb6726
| 40.56
| 78
| 0.5641
| 4.694984
| false
| false
| false
| false
|
mozilla-services/autopush
|
autopush/web/message.py
|
1
|
1772
|
from cryptography.fernet import InvalidToken
from marshmallow import Schema, fields, pre_load
from twisted.internet.threads import deferToThread
from twisted.internet.defer import Deferred # noqa
from autopush.exceptions import InvalidRequest, InvalidTokenException
from autopush.utils import WebPushNotification
from autopush.web.base import threaded_validate, BaseWebHandler
class MessageSchema(Schema):
notification = fields.Raw()
@pre_load
def extract_data(self, req):
message_id = req['path_kwargs'].get('message_id')
try:
notif = WebPushNotification.from_message_id(
bytes(message_id),
fernet=self.context['conf'].fernet,
)
except (InvalidToken, InvalidTokenException):
raise InvalidRequest("Invalid message ID",
status_code=400)
return dict(notification=notif)
class MessageHandler(BaseWebHandler):
cors_methods = "DELETE"
cors_response_headers = ("location",)
@threaded_validate(MessageSchema)
def delete(self, notification):
# type: (WebPushNotification) -> Deferred
"""Drops a pending message.
The message will only be removed from DynamoDB. Messages that were
successfully routed to a client as direct updates, but not delivered
yet, will not be dropped.
"""
d = deferToThread(self.db.message.delete_message, notification)
d.addCallback(self._delete_completed)
self._db_error_handling(d)
return d
def _delete_completed(self, *args, **kwargs):
self.log.debug(format="Message Deleted", status_code=204,
**self._client_info)
self.set_status(204)
self.finish()
|
mpl-2.0
|
ff7d2a1d5dd106efa40c2ec5dddb8a24
| 33.076923
| 76
| 0.659707
| 4.300971
| false
| false
| false
| false
|
mozilla-services/autopush
|
autopush/gcdump.py
|
1
|
3447
|
#! /usr/bin/env python
"""
Prints a human-readable total out of a dumpfile produced
by gc.dump_rpy_heap(), and optionally a typeids.txt.
Syntax: dump.py <dumpfile> [<typeids.txt>]
By default, typeids.txt is loaded from the same dir as dumpfile.
"""
import array
import os
import struct
import sys
class Stat(object):
summary = {}
typeids = {0: '<GCROOT>'}
def summarize(self, filename, stream=None):
a = self.load_dump_file(filename)
self.summary = {} # {typenum: [count, totalsize]}
for obj in self.walk(a, stream=stream):
self.add_object_summary(obj[2], obj[3])
def load_typeids(self, filename_or_iter):
self.typeids = Stat.typeids.copy()
if isinstance(filename_or_iter, str):
iter = open(filename_or_iter)
else:
iter = filename_or_iter
for num, line in enumerate(iter):
if num == 0:
continue
if not line:
continue
words = line.split()
if words[0].startswith('member'):
del words[0]
if words[0] == 'GcStruct':
del words[0]
self.typeids[num] = ' '.join(words)
def get_type_name(self, num):
return self.typeids.get(num, '<typenum %d>' % num)
def print_summary(self, stream):
items = self.summary.items()
items.sort(key=lambda (typenum, stat): stat[1]) # sort by totalsize
totalsize = 0
for typenum, stat in items:
totalsize += stat[1]
stream.write('%8d %8.2fM %s\n' %
(stat[0],
stat[1] / (1024.0*1024.0),
self.get_type_name(typenum)))
stream.write('total %.1fM\n' % (totalsize / (1024.0*1024.0)))
def load_dump_file(self, filename):
f = open(filename, 'rb')
f.seek(0, 2)
end = f.tell()
f.seek(0)
a = array.array('l')
a.fromfile(f, end / struct.calcsize('l'))
f.close()
return a
def add_object_summary(self, typenum, sizeobj):
try:
stat = self.summary[typenum]
except KeyError:
stat = self.summary[typenum] = [0, 0]
stat[0] += 1
stat[1] += sizeobj
def walk(self, a, start=0, stop=None, stream=None):
assert a[-1] == -1, "invalid or truncated dump file (or 32/64-bit mix)"
assert a[-2] != -1, "invalid or truncated dump file (or 32/64-bit mix)"
if stream:
stream.write('walking...')
i = start
if stop is None:
stop = len(a)
while i < stop:
j = i + 3
while a[j] != -1:
j += 1
yield (i, a[i], a[i+1], a[i+2], a[i+3:j])
i = j + 1
if stream:
stream.write('done\n')
if __name__ == '__main__':
if len(sys.argv) <= 1:
print >> sys.stderr, __doc__
sys.exit(2)
stat = Stat()
stat.summarize(sys.argv[1], stream=sys.stderr)
#
if len(sys.argv) > 2:
typeid_name = sys.argv[2]
else:
typeid_name = os.path.join(os.path.dirname(sys.argv[1]), 'typeids.txt')
if os.path.isfile(typeid_name):
stat.load_typeids(typeid_name)
else:
import gc
import zlib
stat.load_typeids(zlib.decompress(gc.get_typeids_z()).split("\n"))
#
stat.print_summary(sys.stdout)
|
mpl-2.0
|
57eda39c31cd4e5c01a6b981eadce2db
| 29.504425
| 79
| 0.519002
| 3.366211
| false
| false
| false
| false
|
dbr/tvnamer
|
tvnamer/_titlecase.py
|
1
|
3442
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# fmt: off
"""
Original Perl version by: John Gruber http://daringfireball.net/ 10 May 2008
Python version by Stuart Colville http://muffinresearch.co.uk
License: http://www.opensource.org/licenses/mit-license.php
"""
import re
__all__ = ['titlecase']
__version__ = '0.5.2'
SMALL = 'a|an|and|as|at|but|by|en|for|if|in|of|on|or|the|to|v\\.?|via|vs\\.?'
PUNCT = r"""!"#$%&'‘()*+,\-./:;?@[\\\]_`{|}~"""
SMALL_WORDS = re.compile(r'^(%s)$' % SMALL, re.I)
INLINE_PERIOD = re.compile(r'[a-z][.][a-z]', re.I)
UC_ELSEWHERE = re.compile(r'[%s]*?[a-zA-Z]+[A-Z]+?' % PUNCT)
CAPFIRST = re.compile(r"^[%s]*?([A-Za-z])" % PUNCT)
SMALL_FIRST = re.compile(r'^([%s]*)(%s)\b' % (PUNCT, SMALL), re.I)
SMALL_LAST = re.compile(r'\b(%s)[%s]?$' % (SMALL, PUNCT), re.I)
SUBPHRASE = re.compile(r'([:.;?!][ ])(%s)' % SMALL)
APOS_SECOND = re.compile(r"^[dol]{1}['‘]{1}[a-z]+$", re.I)
ALL_CAPS = re.compile(r'^[A-Z\s%s]+$' % PUNCT)
UC_INITIALS = re.compile(r"^(?:[A-Z]{1}\.{1}|[A-Z]{1}\.{1}[A-Z]{1})+$")
MAC_MC = re.compile(r"^([Mm]a?c)(\w+)")
def titlecase(text):
"""
Titlecases input text
This filter changes all words to Title Caps, and attempts to be clever
about *un*capitalizing SMALL words like a/an/the in the input.
The list of "SMALL words" which are not capped comes from
the New York Times Manual of Style, plus 'vs' and 'v'.
"""
lines = re.split('[\r\n]+', text)
processed = []
for line in lines:
all_caps = ALL_CAPS.match(line)
words = re.split('[\t ]', line)
tc_line = []
for word in words:
if all_caps:
if UC_INITIALS.match(word):
tc_line.append(word)
continue
else:
word = word.lower()
if APOS_SECOND.match(word):
word = word.replace(word[0], word[0].upper())
word = word.replace(word[2], word[2].upper())
tc_line.append(word)
continue
if INLINE_PERIOD.search(word) or UC_ELSEWHERE.match(word):
tc_line.append(word)
continue
if SMALL_WORDS.match(word):
tc_line.append(word.lower())
continue
match = MAC_MC.match(word)
if match:
tc_line.append("%s%s" % (match.group(1).capitalize(),
match.group(2).capitalize()))
continue
if "/" in word and "//" not in word:
slashed = []
for item in word.split('/'):
slashed.append(CAPFIRST.sub(lambda m: m.group(0).upper(), item))
tc_line.append("/".join(slashed))
continue
hyphenated = []
for item in word.split('-'):
hyphenated.append(CAPFIRST.sub(lambda m: m.group(0).upper(), item))
tc_line.append("-".join(hyphenated))
result = " ".join(tc_line)
result = SMALL_FIRST.sub(lambda m: '%s%s' % (
m.group(1),
m.group(2).capitalize()
), result)
result = SMALL_LAST.sub(lambda m: m.group(0).capitalize(), result)
result = SUBPHRASE.sub(lambda m: '%s%s' % (
m.group(1),
m.group(2).capitalize()
), result)
processed.append(result)
return "\n".join(processed)
|
unlicense
|
48f2a80686652aef5f6396e1a69b8a91
| 31.742857
| 84
| 0.509889
| 3.111312
| false
| false
| false
| false
|
fedspendingtransparency/data-act-broker-backend
|
tests/unit/dataactcore/factories/fsrs.py
|
1
|
10488
|
from datetime import date, datetime, timezone
import factory
from factory import fuzzy
from dataactcore.models import fsrs
class _FSRSAttributes(factory.Factory):
duns = fuzzy.FuzzyText()
uei_number = fuzzy.FuzzyText()
dba_name = fuzzy.FuzzyText()
principle_place_city = fuzzy.FuzzyText()
principle_place_street = None
principle_place_state = fuzzy.FuzzyText()
principle_place_state_name = fuzzy.FuzzyText()
principle_place_country = fuzzy.FuzzyText()
principle_place_zip = fuzzy.FuzzyText()
principle_place_district = None
parent_duns = fuzzy.FuzzyText()
funding_agency_id = fuzzy.FuzzyText()
funding_agency_name = fuzzy.FuzzyText()
top_paid_fullname_1 = None
top_paid_amount_1 = None
top_paid_fullname_2 = None
top_paid_amount_2 = None
top_paid_fullname_3 = None
top_paid_amount_3 = None
top_paid_fullname_4 = None
top_paid_amount_4 = None
top_paid_fullname_5 = None
top_paid_amount_5 = None
class _ContractAttributes(_FSRSAttributes):
company_name = fuzzy.FuzzyText()
bus_types = fuzzy.FuzzyText()
company_address_city = fuzzy.FuzzyText()
company_address_street = None
company_address_state = fuzzy.FuzzyText()
company_address_state_name = fuzzy.FuzzyText()
company_address_country = fuzzy.FuzzyText()
company_address_zip = fuzzy.FuzzyText()
company_address_district = None
parent_company_name = fuzzy.FuzzyText()
naics = fuzzy.FuzzyText()
funding_office_id = fuzzy.FuzzyText()
funding_office_name = fuzzy.FuzzyText()
recovery_model_q1 = fuzzy.FuzzyChoice((False, True))
recovery_model_q2 = fuzzy.FuzzyChoice((False, True))
class _GrantAttributes(_FSRSAttributes):
dunsplus4 = None
awardee_name = fuzzy.FuzzyText()
awardee_address_city = fuzzy.FuzzyText()
awardee_address_street = None
awardee_address_state = fuzzy.FuzzyText()
awardee_address_state_name = fuzzy.FuzzyText()
awardee_address_country = fuzzy.FuzzyText()
awardee_address_zip = fuzzy.FuzzyText()
awardee_address_district = None
cfda_numbers = fuzzy.FuzzyText()
project_description = fuzzy.FuzzyText()
compensation_q1 = fuzzy.FuzzyChoice((False, True))
compensation_q2 = fuzzy.FuzzyChoice((False, True))
federal_agency_id = fuzzy.FuzzyText()
federal_agency_name = fuzzy.FuzzyText()
class _PrimeAwardAttributes(factory.Factory):
internal_id = fuzzy.FuzzyText()
date_submitted = fuzzy.FuzzyDateTime(datetime(2010, 1, 1, tzinfo=timezone.utc))
report_period_mon = fuzzy.FuzzyText()
report_period_year = fuzzy.FuzzyText()
class FSRSProcurementFactory(_ContractAttributes, _PrimeAwardAttributes):
class Meta:
model = fsrs.FSRSProcurement
contract_number = fuzzy.FuzzyText()
idv_reference_number = None
report_type = fuzzy.FuzzyText()
contract_agency_code = fuzzy.FuzzyText()
contract_idv_agency_code = None
contracting_office_aid = fuzzy.FuzzyText()
contracting_office_aname = fuzzy.FuzzyText()
contracting_office_id = fuzzy.FuzzyText()
contracting_office_name = fuzzy.FuzzyText()
treasury_symbol = fuzzy.FuzzyText()
dollar_obligated = fuzzy.FuzzyText()
date_signed = fuzzy.FuzzyDate(date(2010, 1, 1))
transaction_type = fuzzy.FuzzyText()
program_title = fuzzy.FuzzyText()
subawards = []
class FSRSSubcontractFactory(_ContractAttributes):
class Meta:
model = fsrs.FSRSSubcontract
subcontract_amount = fuzzy.FuzzyText()
subcontract_date = fuzzy.FuzzyDate(date(2010, 1, 1))
subcontract_num = fuzzy.FuzzyText()
overall_description = fuzzy.FuzzyText()
recovery_subcontract_amt = None
class FSRSGrantFactory(_GrantAttributes, _PrimeAwardAttributes):
class Meta:
model = fsrs.FSRSGrant
fain = fuzzy.FuzzyText()
total_fed_funding_amount = fuzzy.FuzzyText()
obligation_date = fuzzy.FuzzyDate(date(2010, 1, 1))
class FSRSSubgrantFactory(_GrantAttributes):
class Meta:
model = fsrs.FSRSSubgrant
subaward_amount = fuzzy.FuzzyText()
subaward_date = fuzzy.FuzzyDate(date(2010, 1, 1))
subaward_num = fuzzy.FuzzyText()
class SubawardFactory(factory.Factory):
class Meta:
model = fsrs.Subaward
unique_award_key = fuzzy.FuzzyText()
award_id = fuzzy.FuzzyText()
parent_award_id = fuzzy.FuzzyText()
award_amount = fuzzy.FuzzyText()
action_date = fuzzy.FuzzyDate(date(2010, 1, 1))
fy = fuzzy.FuzzyText()
awarding_agency_code = fuzzy.FuzzyText()
awarding_agency_name = fuzzy.FuzzyText()
awarding_sub_tier_agency_c = fuzzy.FuzzyText()
awarding_sub_tier_agency_n = fuzzy.FuzzyText()
awarding_office_code = fuzzy.FuzzyText()
awarding_office_name = fuzzy.FuzzyText()
funding_agency_code = fuzzy.FuzzyText()
funding_agency_name = fuzzy.FuzzyText()
funding_sub_tier_agency_co = fuzzy.FuzzyText()
funding_sub_tier_agency_na = fuzzy.FuzzyText()
funding_office_code = fuzzy.FuzzyText()
funding_office_name = fuzzy.FuzzyText()
awardee_or_recipient_uei = fuzzy.FuzzyText()
awardee_or_recipient_uniqu = fuzzy.FuzzyText()
awardee_or_recipient_legal = fuzzy.FuzzyText()
dba_name = fuzzy.FuzzyText()
ultimate_parent_uei = fuzzy.FuzzyText()
ultimate_parent_unique_ide = fuzzy.FuzzyText()
ultimate_parent_legal_enti = fuzzy.FuzzyText()
legal_entity_country_code = fuzzy.FuzzyText()
legal_entity_country_name = fuzzy.FuzzyText()
legal_entity_address_line1 = fuzzy.FuzzyText()
legal_entity_city_name = fuzzy.FuzzyText()
legal_entity_state_code = fuzzy.FuzzyText()
legal_entity_state_name = fuzzy.FuzzyText()
legal_entity_zip = fuzzy.FuzzyText()
legal_entity_congressional = fuzzy.FuzzyText()
legal_entity_foreign_posta = fuzzy.FuzzyText()
business_types = fuzzy.FuzzyText()
place_of_perform_city_name = fuzzy.FuzzyText()
place_of_perform_state_code = fuzzy.FuzzyText()
place_of_perform_state_name = fuzzy.FuzzyText()
place_of_performance_zip = fuzzy.FuzzyText()
place_of_perform_congressio = fuzzy.FuzzyText()
place_of_perform_country_co = fuzzy.FuzzyText()
place_of_perform_country_na = fuzzy.FuzzyText()
award_description = fuzzy.FuzzyText()
naics = fuzzy.FuzzyText()
naics_description = fuzzy.FuzzyText()
cfda_numbers = fuzzy.FuzzyText()
cfda_titles = fuzzy.FuzzyText()
subaward_type = fuzzy.FuzzyText()
subaward_report_year = fuzzy.FuzzyText()
subaward_report_month = fuzzy.FuzzyText()
subaward_number = fuzzy.FuzzyText()
subaward_amount = fuzzy.FuzzyText()
sub_action_date = fuzzy.FuzzyDate(date(2010, 1, 1))
sub_awardee_or_recipient_uei = fuzzy.FuzzyText()
sub_awardee_or_recipient_uniqu = fuzzy.FuzzyText()
sub_awardee_or_recipient_legal = fuzzy.FuzzyText()
sub_dba_name = fuzzy.FuzzyText()
sub_ultimate_parent_uei = fuzzy.FuzzyText()
sub_ultimate_parent_unique_ide = fuzzy.FuzzyText()
sub_ultimate_parent_legal_enti = fuzzy.FuzzyText()
sub_legal_entity_country_code = fuzzy.FuzzyText()
sub_legal_entity_country_name = fuzzy.FuzzyText()
sub_legal_entity_address_line1 = fuzzy.FuzzyText()
sub_legal_entity_city_name = fuzzy.FuzzyText()
sub_legal_entity_state_code = fuzzy.FuzzyText()
sub_legal_entity_state_name = fuzzy.FuzzyText()
sub_legal_entity_zip = fuzzy.FuzzyText()
sub_legal_entity_congressional = fuzzy.FuzzyText()
sub_legal_entity_foreign_posta = fuzzy.FuzzyText()
sub_business_types = fuzzy.FuzzyText()
sub_place_of_perform_city_name = fuzzy.FuzzyText()
sub_place_of_perform_state_code = fuzzy.FuzzyText()
sub_place_of_perform_state_name = fuzzy.FuzzyText()
sub_place_of_performance_zip = fuzzy.FuzzyText()
sub_place_of_perform_congressio = fuzzy.FuzzyText()
sub_place_of_perform_country_co = fuzzy.FuzzyText()
sub_place_of_perform_country_na = fuzzy.FuzzyText()
subaward_description = fuzzy.FuzzyText()
sub_high_comp_officer1_full_na = fuzzy.FuzzyText()
sub_high_comp_officer1_amount = fuzzy.FuzzyText()
sub_high_comp_officer2_full_na = fuzzy.FuzzyText()
sub_high_comp_officer2_amount = fuzzy.FuzzyText()
sub_high_comp_officer3_full_na = fuzzy.FuzzyText()
sub_high_comp_officer3_amount = fuzzy.FuzzyText()
sub_high_comp_officer4_full_na = fuzzy.FuzzyText()
sub_high_comp_officer4_amount = fuzzy.FuzzyText()
sub_high_comp_officer5_full_na = fuzzy.FuzzyText()
sub_high_comp_officer5_amount = fuzzy.FuzzyText()
prime_id = fuzzy.FuzzyInteger(0, 100)
internal_id = fuzzy.FuzzyText()
date_submitted = fuzzy.FuzzyDateTime(datetime(2010, 1, 1, tzinfo=timezone.utc))
report_type = fuzzy.FuzzyText()
transaction_type = fuzzy.FuzzyText()
program_title = fuzzy.FuzzyText()
contract_agency_code = fuzzy.FuzzyText()
contract_idv_agency_code = fuzzy.FuzzyText()
grant_funding_agency_id = fuzzy.FuzzyText()
grant_funding_agency_name = fuzzy.FuzzyText()
federal_agency_name = fuzzy.FuzzyText()
treasury_symbol = fuzzy.FuzzyText()
dunsplus4 = fuzzy.FuzzyText()
recovery_model_q1 = fuzzy.FuzzyText()
recovery_model_q2 = fuzzy.FuzzyText()
compensation_q1 = fuzzy.FuzzyText()
compensation_q2 = fuzzy.FuzzyText()
high_comp_officer1_full_na = fuzzy.FuzzyText()
high_comp_officer1_amount = fuzzy.FuzzyText()
high_comp_officer2_full_na = fuzzy.FuzzyText()
high_comp_officer2_amount = fuzzy.FuzzyText()
high_comp_officer3_full_na = fuzzy.FuzzyText()
high_comp_officer3_amount = fuzzy.FuzzyText()
high_comp_officer4_full_na = fuzzy.FuzzyText()
high_comp_officer4_amount = fuzzy.FuzzyText()
high_comp_officer5_full_na = fuzzy.FuzzyText()
high_comp_officer5_amount = fuzzy.FuzzyText()
sub_id = fuzzy.FuzzyInteger(0, 100)
sub_parent_id = fuzzy.FuzzyInteger(0, 100)
sub_federal_agency_id = fuzzy.FuzzyText()
sub_federal_agency_name = fuzzy.FuzzyText()
sub_funding_agency_id = fuzzy.FuzzyText()
sub_funding_agency_name = fuzzy.FuzzyText()
sub_funding_office_id = fuzzy.FuzzyText()
sub_funding_office_name = fuzzy.FuzzyText()
sub_naics = fuzzy.FuzzyText()
sub_cfda_numbers = fuzzy.FuzzyText()
sub_dunsplus4 = fuzzy.FuzzyText()
sub_recovery_subcontract_amt = fuzzy.FuzzyText()
sub_recovery_model_q1 = fuzzy.FuzzyText()
sub_recovery_model_q2 = fuzzy.FuzzyText()
sub_compensation_q1 = fuzzy.FuzzyText()
sub_compensation_q2 = fuzzy.FuzzyText()
|
cc0-1.0
|
12768cdcbf34026c215e0a3106c79ac4
| 38.577358
| 83
| 0.705664
| 3.02684
| false
| false
| false
| false
|
fedspendingtransparency/data-act-broker-backend
|
dataactcore/migrations/versions/4d8408c33fee_add_frec_to_user_model.py
|
1
|
2519
|
"""add FREC to user model
Revision ID: 4d8408c33fee
Revises: da2e50d423ff
Create Date: 2017-07-06 13:19:01.155328
"""
# revision identifiers, used by Alembic.
revision = '4d8408c33fee'
down_revision = 'da2e50d423ff'
branch_labels = None
depends_on = None
from alembic import op
import sqlalchemy as sa
def upgrade(engine_name):
globals()["upgrade_%s" % engine_name]()
def downgrade(engine_name):
globals()["downgrade_%s" % engine_name]()
def upgrade_data_broker():
### commands auto generated by Alembic - please adjust! ###
op.add_column('user_affiliation', sa.Column('frec_id', sa.Integer(), nullable=True))
op.add_column('user_affiliation', sa.Column('user_affiliation_id', sa.Integer(), nullable=False, primary_key=True))
op.create_index(op.f('ix_user_affiliation_cgac_id'), 'user_affiliation', ['cgac_id'], unique=False)
op.create_index(op.f('ix_user_affiliation_frec_id'), 'user_affiliation', ['frec_id'], unique=False)
op.create_index(op.f('ix_user_affiliation_user_id'), 'user_affiliation', ['user_id'], unique=False)
op.create_foreign_key('user_affiliation_frec_fk', 'user_affiliation', 'frec', ['frec_id'], ['frec_id'], ondelete='CASCADE')
op.drop_constraint('user_affiliation_pkey', 'user_affiliation', type_='primary')
op.create_primary_key('user_affiliation_pkey', 'user_affiliation', ['user_affiliation_id'])
op.alter_column('user_affiliation', 'cgac_id',
existing_type=sa.INTEGER(),
nullable=True)
### end Alembic commands ###
def downgrade_data_broker():
op.execute("DELETE FROM user_affiliation "
"WHERE cgac_id IS NULL")
### commands auto generated by Alembic - please adjust! ###
op.alter_column('user_affiliation', 'cgac_id',
existing_type=sa.INTEGER(),
nullable=False)
op.drop_constraint('user_affiliation_pkey', 'user_affiliation', type_='primary')
op.create_primary_key('user_affiliation_pkey', 'user_affiliation', ['user_id', 'cgac_id'])
op.drop_constraint('user_affiliation_frec_fk', 'user_affiliation', type_='foreignkey')
op.drop_index(op.f('ix_user_affiliation_user_id'), table_name='user_affiliation')
op.drop_index(op.f('ix_user_affiliation_frec_id'), table_name='user_affiliation')
op.drop_index(op.f('ix_user_affiliation_cgac_id'), table_name='user_affiliation')
op.drop_column('user_affiliation', 'user_affiliation_id')
op.drop_column('user_affiliation', 'frec_id')
### end Alembic commands ###
|
cc0-1.0
|
9aa740485204676183f732de3572657b
| 39.629032
| 127
| 0.675665
| 3.160602
| false
| false
| false
| false
|
fedspendingtransparency/data-act-broker-backend
|
dataactcore/migrations/versions/812387580a0b_rename_user_permissions_column.py
|
1
|
1298
|
"""rename user permissions column
Revision ID: 812387580a0b
Revises: a97dabbd44f4
Create Date: 2016-11-09 11:40:11.657516
"""
# revision identifiers, used by Alembic.
revision = '812387580a0b'
down_revision = 'a97dabbd44f4'
branch_labels = None
depends_on = None
from alembic import op
import sqlalchemy as sa
def upgrade(engine_name):
globals()["upgrade_%s" % engine_name]()
def downgrade(engine_name):
globals()["downgrade_%s" % engine_name]()
def upgrade_data_broker():
### commands auto generated by Alembic - please adjust! ###
op.execute('TRUNCATE permission_type')
op.add_column('users', sa.Column('permission_type_id', sa.Integer(), nullable=True))
op.create_foreign_key('user_permission_type_fk', 'users', 'permission_type', ['permission_type_id'], ['permission_type_id'])
op.drop_column('users', 'permissions')
### end Alembic commands ###
def downgrade_data_broker():
### commands auto generated by Alembic - please adjust! ###
op.add_column('users', sa.Column('permissions', sa.INTEGER(), autoincrement=False, nullable=True))
op.drop_constraint('user_permission_type_fk', 'users', type_='foreignkey')
op.drop_column('users', 'permission_type_id')
op.execute('TRUNCATE permission_type')
### end Alembic commands ###
|
cc0-1.0
|
8879c51c91151e7e298ce544fabc1ca0
| 27.844444
| 128
| 0.693374
| 3.433862
| false
| false
| false
| false
|
fedspendingtransparency/data-act-broker-backend
|
dataactcore/migrations/versions/11338a6b7e77_adding_business_categories_derivation_.py
|
1
|
4287
|
"""Adding business categories derivation function
Revision ID: 11338a6b7e77
Revises: e26d14b0d235
Create Date: 2021-12-09 09:42:10.715687
"""
# revision identifiers, used by Alembic.
revision = '11338a6b7e77'
down_revision = 'e26d14b0d235'
branch_labels = None
depends_on = None
from alembic import op
import sqlalchemy as sa
def upgrade(engine_name):
globals()["upgrade_%s" % engine_name]()
def downgrade(engine_name):
globals()["downgrade_%s" % engine_name]()
def upgrade_data_broker():
# ### commands auto generated by Alembic - please adjust! ###
op.execute("""
create or replace function compile_fabs_business_categories(business_types text)
returns text[]
immutable parallel safe
as $$
declare
bc_arr text[];
begin
-- BUSINESS (FOR-PROFIT ORGANIZATION)
if business_types ~ '(R|23)'
then
bc_arr := bc_arr || array['small_business'];
end if;
if business_types ~ '(Q|22)'
then
bc_arr := bc_arr || array['other_than_small_business'];
end if;
if bc_arr && array['small_business', 'other_than_small_business']
then
bc_arr := bc_arr || array['category_business'];
end if;
-- NON-PROFIT
if business_types ~ '(M|N|12)'
then
bc_arr := bc_arr || array['nonprofit'];
end if;
-- HIGHER EDUCATION
if business_types ~ '(H|06)'
then
bc_arr := bc_arr || array['public_institution_of_higher_education'];
end if;
if business_types ~ '(O|20)'
then
bc_arr := bc_arr || array['private_institution_of_higher_education'];
end if;
if business_types ~ '(T|U|V|S)'
then
bc_arr := bc_arr || array['minority_serving_institution_of_higher_education'];
end if;
if bc_arr && array[
'public_institution_of_higher_education',
'private_institution_of_higher_education',
'minority_serving_institution_of_higher_education'
]
then
bc_arr := bc_arr || array['higher_education'];
end if;
-- GOVERNMENT
if business_types ~ '(A|00)'
then
bc_arr := bc_arr || array['regional_and_state_government'];
end if;
if business_types ~ '(E)'
then
bc_arr := bc_arr || array['regional_organization'];
end if;
if business_types ~ '(F)'
then
bc_arr := bc_arr || array['us_territory_or_possession'];
end if;
if business_types ~ '(B|C|D|G|01|02|04|05)'
then
bc_arr := bc_arr || array['local_government'];
end if;
if business_types ~ '(I|J|K|11)'
then
bc_arr := bc_arr || array['indian_native_american_tribal_government'];
end if;
if business_types ~ '(L)'
then
bc_arr := bc_arr || array['authorities_and_commissions'];
end if;
if bc_arr && array[
'regional_and_state_government',
'us_territory_or_possession',
'local_government',
'indian_native_american_tribal_government',
'authorities_and_commissions',
'regional_organization'
]
then
bc_arr := bc_arr || array['government'];
end if;
-- INDIVIDUALS
if business_types ~ '(P|21)'
then
bc_arr := bc_arr || array['individuals'];
end if;
-- Sort and return the array.
return array(select unnest(bc_arr) order by 1);
end;
$$ language plpgsql;
""")
# ### end Alembic commands ###
def downgrade_data_broker():
# ### commands auto generated by Alembic - please adjust! ###
op.execute(""" DROP FUNCTION IF EXISTS compile_fabs_business_categories(TEXT) """)
# ### end Alembic commands ###
|
cc0-1.0
|
22ac24043a80e0b4a9940305dd47d36d
| 27.771812
| 94
| 0.504549
| 3.915068
| false
| false
| false
| false
|
fedspendingtransparency/data-act-broker-backend
|
dataactcore/migrations/versions/d45dde2ba15b_alter_detached_regular_award_procurement.py
|
1
|
4837
|
"""Alter and add many columns in DetachedAwardProcurement and AwardProcurement
Revision ID: d45dde2ba15b
Revises: 001758a1ab82
Create Date: 2018-03-09 14:08:13.058669
"""
# revision identifiers, used by Alembic.
revision = 'd45dde2ba15b'
down_revision = '001758a1ab82'
branch_labels = None
depends_on = None
from alembic import op
import sqlalchemy as sa
def upgrade(engine_name):
globals()["upgrade_%s" % engine_name]()
def downgrade(engine_name):
globals()["downgrade_%s" % engine_name]()
def upgrade_data_broker():
### commands auto generated by Alembic - please adjust! ###
op.execute("ALTER TABLE award_procurement RENAME COLUMN walsh_healey_act TO materials_supplies_article")
op.execute("ALTER TABLE award_procurement RENAME COLUMN service_contract_act TO labor_standards")
op.execute("ALTER TABLE award_procurement RENAME COLUMN davis_bacon_act TO construction_wage_rate_req")
op.execute("ALTER TABLE award_procurement RENAME COLUMN government_furnished_equip TO government_furnished_prope")
op.add_column('award_procurement', sa.Column('cage_code', sa.Text(), nullable=True))
op.add_column('award_procurement', sa.Column('inherently_government_func', sa.Text(), nullable=True))
op.add_column('award_procurement', sa.Column('organizational_type', sa.Text(), nullable=True))
op.add_column('award_procurement', sa.Column('number_of_employees', sa.Text(), nullable=True))
op.add_column('award_procurement', sa.Column('annual_revenue', sa.Text(), nullable=True))
op.add_column('award_procurement', sa.Column('total_obligated_amount', sa.Text(), nullable=True))
op.execute("ALTER TABLE detached_award_procurement RENAME COLUMN walsh_healey_act TO materials_supplies_article")
op.execute("ALTER TABLE detached_award_procurement RENAME COLUMN walsh_healey_act_descrip TO materials_supplies_descrip")
op.execute("ALTER TABLE detached_award_procurement RENAME COLUMN service_contract_act TO labor_standards")
op.execute("ALTER TABLE detached_award_procurement RENAME COLUMN service_contract_act_desc TO labor_standards_descrip")
op.execute("ALTER TABLE detached_award_procurement RENAME COLUMN davis_bacon_act TO construction_wage_rate_req")
op.execute("ALTER TABLE detached_award_procurement RENAME COLUMN davis_bacon_act_descrip TO construction_wage_rat_desc")
op.execute("ALTER TABLE detached_award_procurement RENAME COLUMN government_furnished_equip TO government_furnished_prope")
op.add_column('detached_award_procurement', sa.Column('cage_code', sa.Text(), nullable=True))
op.add_column('detached_award_procurement', sa.Column('inherently_government_func', sa.Text(), nullable=True))
op.add_column('detached_award_procurement', sa.Column('organizational_type', sa.Text(), nullable=True))
### end Alembic commands ###
def downgrade_data_broker():
### commands auto generated by Alembic - please adjust! ###
op.execute("ALTER TABLE award_procurement RENAME COLUMN materials_supplies_article TO walsh_healey_act")
op.execute("ALTER TABLE award_procurement RENAME COLUMN labor_standards TO service_contract_act")
op.execute("ALTER TABLE award_procurement RENAME COLUMN construction_wage_rate_req TO davis_bacon_act")
op.execute("ALTER TABLE award_procurement RENAME COLUMN government_furnished_prope TO government_furnished_equip")
op.drop_column('award_procurement', 'cage_code')
op.drop_column('award_procurement', 'inherently_government_func')
op.drop_column('award_procurement', 'organizational_type')
op.drop_column('award_procurement', 'number_of_employees')
op.drop_column('award_procurement', 'annual_revenue')
op.drop_column('award_procurement', 'total_obligated_amount')
op.execute("ALTER TABLE detached_award_procurement RENAME COLUMN materials_supplies_article TO walsh_healey_act")
op.execute("ALTER TABLE detached_award_procurement RENAME COLUMN materials_supplies_descrip TO walsh_healey_act_descrip")
op.execute("ALTER TABLE detached_award_procurement RENAME COLUMN labor_standards TO service_contract_act")
op.execute("ALTER TABLE detached_award_procurement RENAME COLUMN labor_standards_descrip TO service_contract_act_desc")
op.execute("ALTER TABLE detached_award_procurement RENAME COLUMN construction_wage_rate_req TO davis_bacon_act")
op.execute("ALTER TABLE detached_award_procurement RENAME COLUMN construction_wage_rat_desc TO davis_bacon_act_descrip")
op.execute("ALTER TABLE detached_award_procurement RENAME COLUMN government_furnished_prope TO government_furnished_equip")
op.drop_column('detached_award_procurement', 'cage_code')
op.drop_column('detached_award_procurement', 'inherently_government_func')
op.drop_column('detached_award_procurement', 'organizational_type')
### end Alembic commands ###
|
cc0-1.0
|
d291a0d60ad6f598712ad00e1e45eaff
| 59.4625
| 127
| 0.761422
| 3.408739
| false
| false
| false
| false
|
fedspendingtransparency/data-act-broker-backend
|
dataactcore/migrations/versions/df2f541291a5_create_gtas_submission_window_table.py
|
1
|
1146
|
"""Create gtas_submission_window table
Revision ID: df2f541291a5
Revises: 427320dea2ab
Create Date: 2017-07-06 12:06:53.946926
"""
# revision identifiers, used by Alembic.
revision = 'df2f541291a5'
down_revision = '427320dea2ab'
branch_labels = None
depends_on = None
from alembic import op
import sqlalchemy as sa
def upgrade(engine_name):
globals()["upgrade_%s" % engine_name]()
def downgrade(engine_name):
globals()["downgrade_%s" % engine_name]()
def upgrade_data_broker():
### commands auto generated by Alembic - please adjust! ###
op.create_table('gtas_submission_window',
sa.Column('created_at', sa.DateTime(), nullable=True),
sa.Column('updated_at', sa.DateTime(), nullable=True),
sa.Column('window_id', sa.Integer(), nullable=False),
sa.Column('start_date', sa.Date(), nullable=True),
sa.Column('end_date', sa.Date(), nullable=True),
sa.PrimaryKeyConstraint('window_id')
)
### end Alembic commands ###
def downgrade_data_broker():
### commands auto generated by Alembic - please adjust! ###
op.drop_table('gtas_submission_window')
### end Alembic commands ###
|
cc0-1.0
|
23f64eca8a0b4769d555c34497fe52bf
| 23.382979
| 63
| 0.680628
| 3.360704
| false
| false
| false
| false
|
fedspendingtransparency/data-act-broker-backend
|
dataactcore/migrations/versions/4d66a8d6e11b_create_filerequest_table_for_d_file_.py
|
1
|
2841
|
"""create FileRequest table for D file generation
Revision ID: 4d66a8d6e11b
Revises: bcdf1134f0df
Create Date: 2017-10-19 14:28:03.788883
"""
# revision identifiers, used by Alembic.
revision = '4d66a8d6e11b'
down_revision = 'bcdf1134f0df'
branch_labels = None
depends_on = None
from alembic import op
import sqlalchemy as sa
def upgrade(engine_name):
globals()["upgrade_%s" % engine_name]()
def downgrade(engine_name):
globals()["downgrade_%s" % engine_name]()
def upgrade_data_broker():
### commands auto generated by Alembic - please adjust! ###
op.create_table('file_request',
sa.Column('created_at', sa.DateTime(), nullable=True),
sa.Column('updated_at', sa.DateTime(), nullable=True),
sa.Column('file_request_id', sa.Integer(), nullable=False),
sa.Column('request_date', sa.Date(), nullable=False),
sa.Column('job_id', sa.Integer(), nullable=False),
sa.Column('parent_job_id', sa.Integer(), nullable=True),
sa.Column('start_date', sa.Date(), nullable=False),
sa.Column('end_date', sa.Date(), nullable=False),
sa.Column('agency_code', sa.Text(), nullable=False),
sa.Column('file_type', sa.Text(), nullable=False),
sa.Column('is_cached_file', sa.Boolean(), nullable=False),
sa.ForeignKeyConstraint(['job_id'], ['job.job_id'], name='fk_file_request_job_id'),
sa.PrimaryKeyConstraint('file_request_id')
)
op.create_index(op.f('ix_file_request_agency_code'), 'file_request', ['agency_code'], unique=False)
op.create_index(op.f('ix_file_request_end_date'), 'file_request', ['end_date'], unique=False)
op.create_index(op.f('ix_file_request_file_type'), 'file_request', ['file_type'], unique=False)
op.create_index(op.f('ix_file_request_parent_job_id'), 'file_request', ['parent_job_id'], unique=False)
op.create_index(op.f('ix_file_request_request_date'), 'file_request', ['request_date'], unique=False)
op.create_index(op.f('ix_file_request_start_date'), 'file_request', ['start_date'], unique=False)
op.add_column('job', sa.Column('from_cached', sa.Boolean(), server_default='False', nullable=False))
### end Alembic commands ###
def downgrade_data_broker():
### commands auto generated by Alembic - please adjust! ###
op.drop_column('job', 'from_cached')
op.drop_index(op.f('ix_file_request_start_date'), table_name='file_request')
op.drop_index(op.f('ix_file_request_request_date'), table_name='file_request')
op.drop_index(op.f('ix_file_request_parent_job_id'), table_name='file_request')
op.drop_index(op.f('ix_file_request_file_type'), table_name='file_request')
op.drop_index(op.f('ix_file_request_end_date'), table_name='file_request')
op.drop_index(op.f('ix_file_request_agency_code'), table_name='file_request')
op.drop_table('file_request')
### end Alembic commands ###
|
cc0-1.0
|
a9064a67d1ac4888888d51de82b9b3f6
| 40.779412
| 107
| 0.675818
| 3.088043
| false
| false
| false
| false
|
fedspendingtransparency/data-act-broker-backend
|
dataactcore/migrations/versions/7597deb348fb_fabs_created_at_and_fpds_updated_at_.py
|
1
|
1251
|
"""FABS created_at and FPDS updated_at indexes
Revision ID: 7597deb348fb
Revises: b168f0cdc5a8
Create Date: 2018-02-06 16:08:20.985202
"""
# revision identifiers, used by Alembic.
revision = '7597deb348fb'
down_revision = 'b168f0cdc5a8'
branch_labels = None
depends_on = None
from alembic import op
import sqlalchemy as sa
def upgrade(engine_name):
globals()["upgrade_%s" % engine_name]()
def downgrade(engine_name):
globals()["downgrade_%s" % engine_name]()
def upgrade_data_broker():
### commands auto generated by Alembic - please adjust! ###
op.create_index(op.f('ix_detached_award_procurement_updated_at'), 'detached_award_procurement', ['updated_at'], unique=False)
op.create_index(op.f('ix_published_award_financial_assistance_created_at'), 'published_award_financial_assistance', ['created_at'], unique=False)
### end Alembic commands ###
def downgrade_data_broker():
### commands auto generated by Alembic - please adjust! ###
op.drop_index(op.f('ix_detached_award_procurement_updated_at'), table_name='detached_award_procurement')
op.drop_index(op.f('ix_published_award_financial_assistance_created_at'), table_name='published_award_financial_assistance')
### end Alembic commands ###
|
cc0-1.0
|
7ce4bf3c93c7df64e5f5b9b4c48d6cdf
| 28.785714
| 149
| 0.717026
| 3.191327
| false
| false
| false
| false
|
fedspendingtransparency/data-act-broker-backend
|
dataactcore/scripts/load_duns_exec_comp.py
|
1
|
14861
|
import argparse
import datetime
import logging
import os
import re
import json
import tempfile
import boto3
import requests
from dataactcore.config import CONFIG_BROKER
from dataactcore.interfaces.db import GlobalDB
from dataactcore.interfaces.function_bag import update_external_data_load_date
from dataactcore.broker_logging import configure_logging
from dataactcore.models.domainModels import SAMRecipient
from dataactcore.utils.sam_recipient import (parse_sam_recipient_file, update_sam_recipient, parse_exec_comp_file,
update_missing_parent_names, request_sam_csv_api,
is_nonexistent_file_error)
from dataactvalidator.health_check import create_app
logger = logging.getLogger(__name__)
SAM_FILE_FORMAT = 'SAM_{data_type}_UTF-8_{period}{version}_%Y%m%d.ZIP'
DATA_TYPES = {
'DUNS': 'FOUO',
'Executive Compensation': 'EXECCOMP'
}
PERIODS = ['MONTHLY', 'DAILY']
VERSIONS = {
'v1': '', # V1 files simply exclude the version
'v2': '_V2'
}
S3_ARCHIVE = CONFIG_BROKER['sam']['duns']['csv_archive_bucket']
S3_ARCHIVE_PATH = '{data_type}/{version}/{file_name}'
def load_from_sam(data_type, sess, historic, local=None, metrics=None, reload_date=None):
""" Process the script arguments to figure out which files to process in which order
Args:
data_type: data type to load (DUNS or executive compensation)
sess: the database connection
historic: whether to load in monthly file and daily files after, or just the latest daily files
local: path to local directory to process, if None, it will go though the remote SAM service
metrics: dictionary representing metrics data for the load
reload_date: specific date to force reload from
"""
if not metrics:
metrics = {}
# Figure out what files we have available based on our local or remote setup
if local:
local_files = os.listdir(local)
monthly_v1_files = sorted([monthly_file for monthly_file in local_files
if re.match('SAM_{}_UTF-8_MONTHLY_\d+\.ZIP'.format(DATA_TYPES[data_type]),
monthly_file.upper())])
monthly_v2_files = sorted([monthly_file for monthly_file in local_files
if re.match('SAM_{}_UTF-8_MONTHLY_V2_\d+\.ZIP'.format(DATA_TYPES[data_type]),
monthly_file.upper())])
daily_v1_files = sorted([daily_file for daily_file in local_files
if re.match('SAM_{}_UTF-8_DAILY_\d+\.ZIP'.format(DATA_TYPES[data_type]),
daily_file.upper())])
daily_v2_files = sorted([daily_file for daily_file in local_files
if re.match('SAM_{}_UTF-8_DAILY_V2_\d+\.ZIP'.format(DATA_TYPES[data_type]),
daily_file.upper())])
else:
# TODO: the SAM API currently doesn't list available files and doesnt include historic ones,
# so we're pulling files from the CSV_ARCHIVE_BUCKET bucket up and then use the API.
# Rework this if SAM includes these historic files in the API and list what files are available
monthly_v1_files = list_s3_archive_files(data_type, 'MONTHLY', 'v1')
monthly_v2_files = list_s3_archive_files(data_type, 'MONTHLY', 'v2')
daily_v1_files = list_s3_archive_files(data_type, 'DAILY', 'v1')
daily_v2_files = list_s3_archive_files(data_type, 'DAILY', 'v2')
# Extracting the dates from these to figure out which files to process where
# For both monthly and daily files, we only want to process v1 files until the equivalent v2 files are available
monthly_v1_dates = extract_dates_from_list(monthly_v1_files, data_type, 'MONTHLY', 'v1')
monthly_v2_dates = extract_dates_from_list(monthly_v2_files, data_type, 'MONTHLY', 'v2')
monthly_v1_dates = [monthly_v1_date for monthly_v1_date in monthly_v1_dates
if monthly_v1_date not in monthly_v2_dates]
if historic:
earliest_date = sorted(monthly_v1_dates + monthly_v2_dates)[0]
daily_v1_dates = extract_dates_from_list(daily_v1_files, data_type, 'DAILY', 'v1')
daily_v2_dates = extract_dates_from_list(daily_v2_files, data_type, 'DAILY', 'v2')
daily_v1_dates = [daily_v1_dates for daily_v1_dates in daily_v1_dates
if daily_v1_dates not in daily_v2_dates]
latest_date = sorted(daily_v1_dates + daily_v2_dates)[-1]
# For any dates after the latest date we have in the archive, use the API
daily_v2_api_dates = [latest_date + datetime.timedelta(days=i)
for i in range(1, (datetime.date.today() - latest_date).days + 1)]
# determine which daily files to load in by setting the start load date
if historic:
load_date = earliest_date
elif reload_date:
# a bit redundant but also date validation
load_date = datetime.datetime.strptime(reload_date, '%Y-%m-%d').date()
else:
sam_field = SAMRecipient.last_sam_mod_date if data_type == 'DUNS' else SAMRecipient.last_exec_comp_mod_date
load_date = sess.query(sam_field).filter(sam_field.isnot(None)).order_by(sam_field.desc()).first()
if not load_date:
field = 'sam' if data_type == 'DUNS' else 'executive compensation'
raise Exception('No last {} mod date found in DUNS table. Please run historic loader first.'.format(field))
load_date = load_date[0]
# only load in the daily files after the load date
daily_v1_dates = list(filter(lambda daily_date: daily_date >= load_date, daily_v1_dates))
daily_v2_dates = list(filter(lambda daily_date: daily_date >= load_date, daily_v2_dates))
daily_v2_api_dates = list(filter(lambda daily_date: daily_date >= load_date, daily_v2_api_dates))
if historic:
# load in the earliest monthly file and all daily files after
version = 'v1' if earliest_date in monthly_v1_dates else 'v2'
process_sam_file(data_type, 'MONTHLY', version, earliest_date, sess, local=local, metrics=metrics)
for daily_v1_date in daily_v1_dates:
process_sam_file(data_type, 'DAILY', 'v1', daily_v1_date, sess, local=local, metrics=metrics)
for daily_v2_date in daily_v2_dates:
process_sam_file(data_type, 'DAILY', 'v2', daily_v2_date, sess, local=local, metrics=metrics)
if not local:
for daily_api_v2_date in daily_v2_api_dates:
try:
process_sam_file(data_type, 'DAILY', 'v2', daily_api_v2_date, sess, local=local, api=True,
metrics=metrics)
except requests.exceptions.HTTPError as e:
if is_nonexistent_file_error(e):
logger.warning('No file found for {}, continuing'.format(daily_api_v2_date))
continue
else:
logger.exception(e.response.content.decode('utf-8'))
raise e
if data_type == 'DUNS':
updated_date = datetime.date.today()
metrics['parent_rows_updated'] = update_missing_parent_names(sess, updated_date=updated_date)
metrics['parent_update_date'] = str(updated_date)
if historic:
logger.info('Despite the historical load being done, the UEI will most likely be out of date. '
'Please manually update using the UEI crosswalk file and SQL.')
def extract_dates_from_list(sam_files, data_type, period, version):
""" Given a list of SAM files, extract the dates the files refer to
Args:
sam_files: list of sam file names to extract dates from
data_type: data type to load (DUNS or executive compensation)
period: monthly or daily
version: v1 or v2
Returns:
sorted list of dates corresponding to the files
"""
sam_filename_format = SAM_FILE_FORMAT.format(data_type=DATA_TYPES[data_type], period=period,
version=VERSIONS[version])
return sorted([datetime.datetime.strptime(sam_file, sam_filename_format).date() for sam_file in sam_files])
def list_s3_archive_files(data_type, period, version):
""" Given the requested fields, provide a list of available files from the remote S3 archive
Args:
data_type: data type to load (DUNS or executive compensation)
period: monthly or daily
version: v1 or v2
Returns:
list of available files in the S3 archive
"""
s3_resource = boto3.resource('s3', region_name='us-gov-west-1')
archive_bucket = s3_resource.Bucket(S3_ARCHIVE)
file_name = SAM_FILE_FORMAT[:30].format(data_type=DATA_TYPES[data_type], period=period)
prefix = S3_ARCHIVE_PATH.format(data_type=data_type, version=version, file_name=file_name)
return [os.path.basename(object.key) for object in archive_bucket.objects.filter(Prefix=prefix)]
def download_sam_file(root_dir, file_name, api=False):
""" Downloads the requested DUNS file to root_dir
Args:
root_dir: the folder containing the DUNS file
file_name: the name of the SAM file
api: whether to use the SAM CSV API or not
Raises:
requests.exceptions.HTTPError if the SAM HTTP API doesnt have the file requested
"""
logger.info('Pulling {} via {}'.format(file_name, 'API' if api else 'archive'))
if api:
request_sam_csv_api(root_dir, file_name)
else:
s3_client = boto3.client('s3', region_name='us-gov-west-1')
reverse_map = {v: k for k, v in DATA_TYPES.items()}
data_type = reverse_map[file_name.split('_')[1]]
version = 'v2' if 'V2' in file_name else 'v1'
key = S3_ARCHIVE_PATH.format(data_type=data_type, version=version, file_name=file_name)
s3_client.download_file(S3_ARCHIVE, key, os.path.join(root_dir, file_name))
def process_sam_file(data_type, period, version, date, sess, local=None, api=False, metrics=None):
""" Process the SAM file found locally or remotely
Args:
data_type: data type to load (DUNS or executive compensation)
period: monthly or daily
version: v1 or v2
sess: the database connection
local: path to local directory to process, if None, it will go though the remote SAM service
api: whether to use the SAM CSV API or not
metrics: dictionary representing metrics data for the load
Raises:
requests.exceptions.HTTPError if the SAM HTTP API doesnt have the file requested
"""
if not metrics:
metrics = {}
root_dir = local if local else tempfile.gettempdir()
file_name_format = SAM_FILE_FORMAT.format(data_type=DATA_TYPES[data_type], period=period, version=VERSIONS[version])
file_name = date.strftime(file_name_format)
if not local:
download_sam_file(root_dir, file_name, api=api)
file_path = os.path.join(root_dir, file_name)
includes_uei = version == 'v2'
if data_type == 'DUNS':
add_update_data, delete_data = parse_sam_recipient_file(file_path, metrics=metrics)
if add_update_data is not None:
update_sam_recipient(sess, add_update_data, metrics=metrics, includes_uei=includes_uei)
if delete_data is not None:
update_sam_recipient(sess, delete_data, metrics=metrics, deletes=True, includes_uei=includes_uei)
else:
exec_comp_data = parse_exec_comp_file(file_path, metrics=metrics)
update_sam_recipient(sess, exec_comp_data, metrics=metrics, includes_uei=includes_uei)
if not local:
os.remove(file_path)
if __name__ == '__main__':
now = datetime.datetime.now()
configure_logging()
parser = argparse.ArgumentParser(description='Get data from SAM and update SAM Recipient/exec comp tables')
parser.add_argument("-t", "--data_type", choices=['duns', 'exec_comp', 'both'], default='both',
help='Select data type to load')
scope = parser.add_mutually_exclusive_group(required=True)
scope.add_argument("-a", "--historic", action="store_true", help='Reload from the first monthly file on')
scope.add_argument("-u", "--update", action="store_true", help='Load daily files since latest last_sam_mod_date')
environ = parser.add_mutually_exclusive_group(required=True)
environ.add_argument("-l", "--local", type=str, default=None, help='Local directory to work from')
environ.add_argument("-r", "--remote", action="store_true", help='Work from a remote directory (SAM)')
parser.add_argument("-f", "--reload_date", type=str, default=None, help='Force update from a specific date'
' (YYYY-MM-DD)')
args = parser.parse_args()
data_type = args.data_type
historic = args.historic
update = args.update
local = args.local
reload_date = args.reload_date
metrics = {
'script_name': 'load_duns_exec_comp.py',
'start_time': str(now),
'files_processed': [],
'records_received': 0,
'records_processed': 0,
'adds_received': 0,
'updates_received': 0,
'deletes_received': 0,
'added_uei': [],
'updated_uei': [],
'records_added': 0,
'records_updated': 0,
'parent_rows_updated': 0,
'parent_update_date': None
}
with create_app().app_context():
sess = GlobalDB.db().session
if data_type in ('duns', 'both'):
start_time = datetime.datetime.now()
load_from_sam('DUNS', sess, historic, local, metrics=metrics, reload_date=reload_date)
update_external_data_load_date(start_time, datetime.datetime.now(), 'recipient')
if data_type in ('exec_comp', 'both'):
start_time = datetime.datetime.now()
load_from_sam('Executive Compensation', sess, historic, local, metrics=metrics, reload_date=reload_date)
update_external_data_load_date(start_time, datetime.datetime.now(), 'executive_compensation')
sess.close()
metrics['records_added'] = len(set(metrics['added_uei']))
metrics['records_updated'] = len(set(metrics['updated_uei']) - set(metrics['added_uei']))
del metrics['added_uei']
del metrics['updated_uei']
logger.info('Added {} records and updated {} records'.format(metrics['records_added'], metrics['records_updated']))
metrics['duration'] = str(datetime.datetime.now() - now)
with open('load_duns_exec_comp_metrics.json', 'w+') as metrics_file:
json.dump(metrics, metrics_file)
|
cc0-1.0
|
be7e7644638804e04662161a59c1cfb7
| 48.046205
| 120
| 0.631317
| 3.602667
| false
| false
| false
| false
|
fedspendingtransparency/data-act-broker-backend
|
tests/unit/dataactvalidator/test_fabs33_1.py
|
1
|
1700
|
from tests.unit.dataactcore.factories.staging import FABSFactory
from tests.unit.dataactvalidator.utils import number_of_errors, query_columns
_FILE = 'fabs33_1'
def test_column_headers(database):
expected_subset = {'row_number', 'period_of_performance_curr', 'uniqueid_AssistanceTransactionUniqueKey'}
actual = set(query_columns(_FILE, database))
assert expected_subset == actual
def test_success(database):
""" PeriodOfPerformanceCurrentEndDate is an optional field, but when provided, must follow YYYYMMDD format """
fabs_1 = FABSFactory(period_of_performance_curr='19990131', correction_delete_indicatr='')
fabs_2 = FABSFactory(period_of_performance_curr=None, correction_delete_indicatr='c')
fabs_3 = FABSFactory(period_of_performance_curr='', correction_delete_indicatr=None)
# Ignore correction delete indicator of D
fabs_4 = FABSFactory(period_of_performance_curr='1234', correction_delete_indicatr='d')
errors = number_of_errors(_FILE, database, models=[fabs_1, fabs_2, fabs_3, fabs_4])
assert errors == 0
def test_failure(database):
""" PeriodOfPerformanceCurrentEndDate is an optional field, but when provided, must follow YYYYMMDD format """
fabs_1 = FABSFactory(period_of_performance_curr='19990132', correction_delete_indicatr='')
fabs_2 = FABSFactory(period_of_performance_curr='19991331', correction_delete_indicatr=None)
fabs_3 = FABSFactory(period_of_performance_curr='1234', correction_delete_indicatr='c')
fabs_4 = FABSFactory(period_of_performance_curr='200912', correction_delete_indicatr='C')
errors = number_of_errors(_FILE, database, models=[fabs_1, fabs_2, fabs_3, fabs_4])
assert errors == 4
|
cc0-1.0
|
353a34ed30d037cb3ab7dc150928bea1
| 50.515152
| 114
| 0.741765
| 3.359684
| false
| true
| false
| false
|
fedspendingtransparency/data-act-broker-backend
|
dataactcore/migrations/versions/5664b0e3e179_add_agency_name_and_sub_tier_agency_.py
|
1
|
1469
|
"""Add agency name and sub-tier agency name to published award financial assistance
Revision ID: 5664b0e3e179
Revises: ce1087583081
Create Date: 2017-05-19 02:47:18.081619
"""
# revision identifiers, used by Alembic.
revision = '5664b0e3e179'
down_revision = 'ce1087583081'
branch_labels = None
depends_on = None
from alembic import op
import sqlalchemy as sa
def upgrade(engine_name):
globals()["upgrade_%s" % engine_name]()
def downgrade(engine_name):
globals()["downgrade_%s" % engine_name]()
def upgrade_data_broker():
op.add_column('published_award_financial_assistance', sa.Column('awarding_agency_name', sa.Text(), nullable=True))
op.add_column('published_award_financial_assistance', sa.Column('awarding_sub_tier_agency_n', sa.Text(), nullable=True))
op.add_column('published_award_financial_assistance', sa.Column('funding_agency_name', sa.Text(), nullable=True))
op.add_column('published_award_financial_assistance', sa.Column('funding_sub_tier_agency_na', sa.Text(), nullable=True))
### end Alembic commands ###
def downgrade_data_broker():
op.drop_column('published_award_financial_assistance', 'funding_sub_tier_agency_na')
op.drop_column('published_award_financial_assistance', 'funding_agency_name')
op.drop_column('published_award_financial_assistance', 'awarding_sub_tier_agency_n')
op.drop_column('published_award_financial_assistance', 'awarding_agency_name')
### end Alembic commands ###
|
cc0-1.0
|
77d42f0b5f93174280fa94f7fbc5d341
| 34.829268
| 124
| 0.730429
| 3.041408
| false
| false
| false
| false
|
fedspendingtransparency/data-act-broker-backend
|
dataactcore/migrations/versions/ae35bd44ec9a_add_multicolumn_indexes_to_published_.py
|
1
|
1326
|
"""Add multicolumn indexes to published_award_financial_assistance
Revision ID: ae35bd44ec9a
Revises: 66ce64f4c1da
Create Date: 2018-09-19 09:15:27.852093
"""
# revision identifiers, used by Alembic.
revision = 'ae35bd44ec9a'
down_revision = '66ce64f4c1da'
branch_labels = None
depends_on = None
from alembic import op
import sqlalchemy as sa
def upgrade(engine_name):
globals()["upgrade_%s" % engine_name]()
def downgrade(engine_name):
globals()["downgrade_%s" % engine_name]()
def upgrade_data_broker():
# ### commands auto generated by Alembic - please adjust! ###
op.create_index('ix_pafa_fain_awarding_sub_tier_is_active', 'published_award_financial_assistance', ['fain', 'awarding_sub_tier_agency_c', 'is_active'], unique=False)
op.create_index('ix_pafa_uri_awarding_sub_tier_is_active', 'published_award_financial_assistance', ['uri', 'awarding_sub_tier_agency_c', 'is_active'], unique=False)
# ### end Alembic commands ###
def downgrade_data_broker():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_index('ix_pafa_uri_awarding_sub_tier_is_active', table_name='published_award_financial_assistance')
op.drop_index('ix_pafa_fain_awarding_sub_tier_is_active', table_name='published_award_financial_assistance')
# ### end Alembic commands ###
|
cc0-1.0
|
39bac2af114b0638f39d225e04724fe0
| 30.571429
| 170
| 0.711916
| 2.993228
| false
| false
| false
| false
|
fedspendingtransparency/data-act-broker-backend
|
dataactcore/migrations/versions/6a7fa3623e2c_add_country_code_2_char_column_to_.py
|
1
|
1112
|
"""Add country_code_2_char column to country_code table
Revision ID: 6a7fa3623e2c
Revises: 4d64c79360af
Create Date: 2021-08-25 13:55:39.566227
"""
# revision identifiers, used by Alembic.
revision = '6a7fa3623e2c'
down_revision = '4d64c79360af'
branch_labels = None
depends_on = None
from alembic import op
import sqlalchemy as sa
def upgrade(engine_name):
globals()["upgrade_%s" % engine_name]()
def downgrade(engine_name):
globals()["downgrade_%s" % engine_name]()
def upgrade_data_broker():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('country_code', sa.Column('country_code_2_char', sa.Text(), nullable=True))
op.create_index(op.f('ix_country_code_country_code_2_char'), 'country_code', ['country_code_2_char'], unique=False)
# ### end Alembic commands ###
def downgrade_data_broker():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_index(op.f('ix_country_code_country_code_2_char'), table_name='country_code')
op.drop_column('country_code', 'country_code_2_char')
# ### end Alembic commands ###
|
cc0-1.0
|
54ac27ab70892b1bff615ba2724bd9d9
| 25.47619
| 119
| 0.686151
| 3.106145
| false
| false
| false
| false
|
fedspendingtransparency/data-act-broker-backend
|
dataactcore/scripts/load_historical_published_flex_field.py
|
1
|
7805
|
import csv
import boto3
import datetime
import logging
import tempfile
import pandas as pd
from pandas import isnull
from sqlalchemy import func
from dataactcore.config import CONFIG_BROKER
from dataactcore.interfaces.db import GlobalDB
from dataactcore.broker_logging import configure_logging
from dataactcore.models.jobModels import PublishedFilesHistory, Job
from dataactcore.models.jobModels import Submission
from dataactcore.models.userModel import User # noqa
from dataactcore.models.lookups import PUBLISH_STATUS_DICT, FILE_TYPE_DICT, JOB_TYPE_DICT, FILE_TYPE_DICT_ID
from dataactcore.models.stagingModels import FlexField, PublishedFlexField
from dataactvalidator.health_check import create_app
from dataactvalidator.scripts.loader_utils import insert_dataframe
logger = logging.getLogger(__name__)
FILE_LIST = [FILE_TYPE_DICT['appropriations'], FILE_TYPE_DICT['program_activity'], FILE_TYPE_DICT['award_financial']]
def copy_published_submission_flex_fields():
""" Copy flex fields from the flex_field table to the published_flex_field table for published DABS submissions. """
logger.info('Moving published flex fields')
sess = GlobalDB.db().session
column_list = [col.key for col in FlexField.__table__.columns]
column_list.remove('created_at')
column_list.remove('updated_at')
column_list.remove('flex_field_id')
published_col_string = ', '.join(column_list)
col_string = ', '.join([col if not col == 'submission_id' else 'flex_field.' + col for col in column_list])
# Delete the old ones so we don't have conflicts
sess.execute(
"""DELETE FROM published_flex_field
USING submission
WHERE submission.submission_id = published_flex_field.submission_id
AND publish_status_id = {}
""".format(PUBLISH_STATUS_DICT['published']))
# Insert all flex fields from submissions in the published (not updated) status
sess.execute(
"""INSERT INTO published_flex_field (created_at, updated_at, {})
SELECT NOW() AS created_at, NOW() AS updated_at, {}
FROM flex_field
JOIN submission ON submission.submission_id = flex_field.submission_id
WHERE submission.publish_status_id = {}
AND submission.is_fabs IS FALSE
""".format(published_col_string, col_string, PUBLISH_STATUS_DICT['published']))
sess.commit()
logger.info('Moved published flex fields')
def clean_col(datum):
if isnull(datum) or not str(datum).strip():
return None
# Trim
return str(datum).strip()
def process_flex_data(data, flex_headers, submission_id, job_id, file_type_id):
""" Process the file that contains flex fields and insert all flex cells into the published table
Args:
data: The pandas dataframe containing the file
flex_headers: The flex fields contained in this file
submission_id: The ID associated with the submission this file comes from
job_id: The ID associated with the job this file comes from
file_type_id: The ID of the file type that this is
"""
# Only use the flex columns
data = data.rename(columns=lambda x: x.lower().strip())
data = data[list(flex_headers)]
if len(data.index) > 0:
data = data.applymap(clean_col)
# Populate row number, adding 2 to the index because the first row is always row 2 but index starts at 0
data = data.reset_index()
data['row_number'] = data.index + 2
data = data.drop(['index'], axis=1)
# Split each flex field into its own row with both content and headers while keeping the row number
new_df = pd.melt(data, id_vars=['row_number'], value_vars=flex_headers, var_name='header', value_name='cell')
# Filling in all the shared data for these flex fields
now = datetime.datetime.now()
new_df['created_at'] = now
new_df['updated_at'] = now
new_df['job_id'] = job_id
new_df['submission_id'] = submission_id
new_df['file_type_id'] = file_type_id
return new_df
def load_updated_flex_fields():
""" Load in flex fields from updated submissions as they were at the latest publication """
logger.info('Moving updated flex fields')
sess = GlobalDB.db().session
# Get a list of all submissions with published flex fields
published_flex_subs = sess.query(PublishedFlexField.submission_id).distinct().all()
# We only want to go through updated submissions without flex fields already loaded
updated_subs = sess.query(Submission.submission_id).\
filter(~Submission.submission_id.in_(published_flex_subs),
Submission.is_fabs.is_(False),
Submission.publish_status_id == PUBLISH_STATUS_DICT['updated']).all()
published_ids = sess. \
query(func.max(PublishedFilesHistory.publish_history_id).label('max_pub_id')). \
filter(PublishedFilesHistory.submission_id.in_(updated_subs)). \
group_by(PublishedFilesHistory.submission_id).cte('published_ids')
historical_files = sess.query(PublishedFilesHistory.filename, PublishedFilesHistory.file_type_id,
PublishedFilesHistory.submission_id). \
join(published_ids, published_ids.c.max_pub_id == PublishedFilesHistory.publish_history_id).\
filter(PublishedFilesHistory.file_type_id.in_(FILE_LIST))
# Loop through each updated submission
for historical_file in historical_files:
filename = historical_file.filename
submission_id = historical_file.submission_id
file_type_id = historical_file.file_type_id
# If this is a file in S3, download to a local temp file first then use temp file as local file
if CONFIG_BROKER['use_aws']:
(file, tmp_filename) = tempfile.mkstemp()
s3 = boto3.client('s3', region_name=CONFIG_BROKER['aws_region'])
s3.download_file(CONFIG_BROKER['certified_bucket'], filename, tmp_filename)
filename = tmp_filename
with open(filename) as file:
# Get file delimiter, get an array of the header row, and reset reader to start of file
header_line = file.readline()
delim = '|' if header_line.count('|') != 0 else ','
header_row = next(csv.reader([header_line], quotechar='"', dialect='excel', delimiter=delim))
file.seek(0)
flex_list = [header.lower() for header in header_row if header.lower().startswith('flex_')]
# If there are no flex fields, just ignore this file, no need to go through it
if len(flex_list) == 0:
continue
# Create dataframe from file
data = pd.read_csv(file, dtype=str, delimiter=delim)
logger.info('Moving flex fields for submission {}, {} file'.format(submission_id,
FILE_TYPE_DICT_ID[file_type_id]))
# Getting the job so we can get the ID
job = sess.query(Job).filter_by(submission_id=submission_id, file_type_id=file_type_id,
job_type_id=JOB_TYPE_DICT['csv_record_validation']).one()
# Process and insert the data
flex_data = process_flex_data(data, flex_list, submission_id, job.job_id, file_type_id)
insert_dataframe(flex_data, PublishedFlexField.__table__.name, sess.connection())
sess.commit()
logger.info('Moved updated flex fields')
def main():
""" Load flex fields for published submissions that haven't been loaded into the published flex fields table. """
copy_published_submission_flex_fields()
load_updated_flex_fields()
if __name__ == '__main__':
configure_logging()
with create_app().app_context():
main()
|
cc0-1.0
|
bd253be04b2b61294cb2501f6e2d190c
| 41.418478
| 120
| 0.665086
| 3.984176
| false
| false
| false
| false
|
fedspendingtransparency/data-act-broker-backend
|
dataactcore/migrations/versions/d3c10a9f589a_create_states_table.py
|
1
|
1124
|
"""Create States table
Revision ID: d3c10a9f589a
Revises: ff7c3328a4b1
Create Date: 2017-07-20 13:25:58.071561
"""
# revision identifiers, used by Alembic.
revision = 'd3c10a9f589a'
down_revision = 'ff7c3328a4b1'
branch_labels = None
depends_on = None
from alembic import op
import sqlalchemy as sa
def upgrade(engine_name):
globals()["upgrade_%s" % engine_name]()
def downgrade(engine_name):
globals()["downgrade_%s" % engine_name]()
def upgrade_data_broker():
### commands auto generated by Alembic - please adjust! ###
op.create_table('states',
sa.Column('created_at', sa.DateTime(), nullable=True),
sa.Column('updated_at', sa.DateTime(), nullable=True),
sa.Column('states_id', sa.Integer(), nullable=False),
sa.Column('state_code', sa.Text(), nullable=True),
sa.Column('state_name', sa.Text(), nullable=True),
sa.PrimaryKeyConstraint('states_id')
)
### end Alembic commands ###
def downgrade_data_broker():
### commands auto generated by Alembic - please adjust! ###
op.drop_table('states')
### end Alembic commands ###
|
cc0-1.0
|
95eda7b8d8176ce5d35e7c5518c5b382
| 22.914894
| 63
| 0.658363
| 3.355224
| false
| false
| false
| false
|
fedspendingtransparency/data-act-broker-backend
|
dataactcore/models/domainModels.py
|
1
|
22858
|
from datetime import timedelta
import pandas as pd
import sqlalchemy as sa
from sqlalchemy import (Column, Date, DateTime, ForeignKey, Index, Integer, Numeric, Text, Float, UniqueConstraint,
Boolean, ARRAY)
from sqlalchemy.orm import relationship
from dataactcore.models.baseModel import Base
def concat_tas(context):
""" Given a database context, return a concatenated TAS string.
Arguments:
context: the context for the current model
Returns:
concatenated TAS string of the current model
"""
return concat_tas_dict(context.current_parameters)
def concat_display_tas(context):
""" Given a database context, return a concatenated display TAS string.
Arguments:
context: the context for the current model
Returns:
concatenated display TAS string of the current model
"""
return concat_display_tas_dict(context.current_parameters)
def concat_tas_dict(tas_dict):
""" Given a dictionary, create a concatenated TAS string.
Arguments:
tas_dict: dictionary representing the object with the TAS attributes
Returns:
concatenated TAS string of the current model
"""
tas1 = tas_dict['allocation_transfer_agency']
tas1 = tas1 if tas1 else '000'
tas2 = tas_dict['agency_identifier']
tas2 = tas2 if tas2 else '000'
tas3 = tas_dict['beginning_period_of_availa']
tas3 = tas3 if tas3 else '0000'
tas4 = tas_dict['ending_period_of_availabil']
tas4 = tas4 if tas4 else '0000'
tas5 = tas_dict['availability_type_code']
tas5 = tas5 if tas5 else ' '
tas6 = tas_dict['main_account_code']
tas6 = tas6 if tas6 else '0000'
tas7 = tas_dict['sub_account_code']
tas7 = tas7 if tas7 else '000'
tas = '{}{}{}{}{}{}{}'.format(tas1, tas2, tas3, tas4, tas5, tas6, tas7)
return tas
def concat_tas_dict_vectorized(frame: pd.DataFrame):
""" Given a DataFrame containing columns for all TAS components, build a Series of the TAS string.
Arguments:
frame: the DataFrame from whose columns to build the TAS display string
Returns:
A series containing TAS display strings
"""
tas_frame = frame[list(TAS_COMPONENTS)].copy()
tas_frame['allocation_transfer_agency'] = tas_frame['allocation_transfer_agency'].fillna('000')
tas_frame['agency_identifier'] = tas_frame['agency_identifier'].fillna('000')
tas_frame['beginning_period_of_availa'] = tas_frame['beginning_period_of_availa'].fillna('0000')
tas_frame['ending_period_of_availabil'] = tas_frame['ending_period_of_availabil'].fillna('0000')
tas_frame['availability_type_code'] = tas_frame['availability_type_code'].fillna(' ')
tas_frame['main_account_code'] = tas_frame['main_account_code'].fillna('0000')
tas_frame['sub_account_code'] = tas_frame['sub_account_code'].fillna('000')
return \
tas_frame['allocation_transfer_agency'] + \
tas_frame['agency_identifier'] + \
tas_frame['beginning_period_of_availa'] + \
tas_frame['ending_period_of_availabil'] + \
tas_frame['availability_type_code'] + \
tas_frame['main_account_code'] + \
tas_frame['sub_account_code']
def concat_display_tas_dict(tas_dict):
""" Given a dictionary, create a concatenated display TAS string. Copied directly from USASpending.gov.
Arguments:
tas_dict: dictionary representing the object with the TAS attributes
Returns:
concatenated display TAS string of the current model
"""
tas_rendering_label = "-".join(filter(None, (tas_dict['allocation_transfer_agency'],
tas_dict['agency_identifier'])))
typecode = tas_dict['availability_type_code']
if typecode:
tas_rendering_label = "-".join(filter(None, (tas_rendering_label, typecode)))
else:
poa = "/".join(filter(None, (tas_dict['beginning_period_of_availa'], tas_dict['ending_period_of_availabil'])))
tas_rendering_label = "-".join(filter(None, (tas_rendering_label, poa)))
tas_rendering_label = "-".join(filter(None, (tas_rendering_label, tas_dict['main_account_code'],
tas_dict['sub_account_code'])))
return tas_rendering_label
TAS_COMPONENTS = (
'allocation_transfer_agency', 'agency_identifier', 'beginning_period_of_availa', 'ending_period_of_availabil',
'availability_type_code', 'main_account_code', 'sub_account_code'
)
class TASLookup(Base):
""" An entry of CARS history -- this TAS was present in the CARS file between internal_start_date and
internal_end_date (potentially null)
"""
__tablename__ = "tas_lookup"
tas_id = Column(Integer, primary_key=True)
account_num = Column(Integer, index=True, nullable=False)
allocation_transfer_agency = Column(Text, nullable=True, index=True)
agency_identifier = Column(Text, nullable=True, index=True)
beginning_period_of_availa = Column(Text, nullable=True, index=True)
ending_period_of_availabil = Column(Text, nullable=True, index=True)
availability_type_code = Column(Text, nullable=True, index=True)
main_account_code = Column(Text, nullable=True, index=True)
sub_account_code = Column(Text, nullable=True, index=True)
internal_start_date = Column(Date, nullable=False)
internal_end_date = Column(Date, nullable=True)
financial_indicator2 = Column(Text, nullable=True)
fr_entity_description = Column(Text, nullable=True)
fr_entity_type = Column(Text, nullable=True)
account_title = Column(Text, nullable=True)
reporting_agency_aid = Column(Text, nullable=True)
reporting_agency_name = Column(Text, nullable=True)
budget_bureau_code = Column(Text, nullable=True)
budget_bureau_name = Column(Text, nullable=True)
budget_function_code = Column(Text, nullable=True)
budget_function_title = Column(Text, nullable=True)
budget_subfunction_code = Column(Text, nullable=True)
budget_subfunction_title = Column(Text, nullable=True)
tas = Column(Text, nullable=False, default=concat_tas, index=True)
display_tas = Column(Text, default=concat_display_tas)
def component_dict(self):
""" We'll often want to copy TAS component fields; this method returns a dictionary of field_name to value """
return {field_name: getattr(self, field_name) for field_name in TAS_COMPONENTS}
Index("ix_tas",
TASLookup.allocation_transfer_agency,
TASLookup.agency_identifier,
TASLookup.beginning_period_of_availa,
TASLookup.ending_period_of_availabil,
TASLookup.availability_type_code,
TASLookup.main_account_code,
TASLookup.sub_account_code,
TASLookup.internal_start_date,
TASLookup.internal_end_date)
def is_not_distinct_from(left, right):
""" Postgres' IS NOT DISTINCT FROM is an equality check that accounts for NULLs. Unfortunately, it doesn't make
use of indexes. Instead, we'll imitate it here
"""
return sa.or_(left == right, sa.and_(left.is_(None), right.is_(None)))
def matching_cars_subquery(sess, model_class, start_date, end_date, submission_id=None):
""" We frequently need to mass-update records to look up their CARS history entry. This function creates a subquery
to be used in that update call. We pass in the database session to avoid circular dependencies
"""
# Why min()?
# Our data schema doesn't prevent two TAS history entries with the same
# TAS components (ATA, AI, etc.) from being valid at the same time. When
# that happens (unlikely), we select the minimum (i.e. older) of the
# potential TAS history entries.
subquery = sess.query(sa.func.min(TASLookup.account_num))
# Filter to matching TAS components, accounting for NULLs
for field_name in TAS_COMPONENTS:
tas_col = getattr(TASLookup, field_name)
model_col = getattr(model_class, field_name)
subquery = subquery.filter(sa.func.coalesce(tas_col, '') == sa.func.coalesce(model_col, ''))
day_after_end = end_date + timedelta(days=1)
model_dates = sa.tuple_(start_date, end_date)
tas_dates = sa.tuple_(TASLookup.internal_start_date, sa.func.coalesce(TASLookup.internal_end_date, day_after_end))
subquery = subquery.filter(model_dates.op('OVERLAPS')(tas_dates))
if submission_id:
model_sub_id = getattr(model_class, 'submission_id')
subquery = subquery.filter(submission_id == model_sub_id)
return subquery.as_scalar()
class CGAC(Base):
__tablename__ = "cgac"
cgac_id = Column(Integer, primary_key=True)
cgac_code = Column(Text, nullable=False, index=True, unique=True)
agency_name = Column(Text)
icon_name = Column(Text)
class FREC(Base):
__tablename__ = "frec"
frec_id = Column(Integer, primary_key=True)
frec_code = Column(Text, nullable=True, index=True, unique=True)
agency_name = Column(Text)
cgac_id = Column(Integer, ForeignKey("cgac.cgac_id", name='fk_frec_cgac', ondelete="CASCADE"), nullable=False)
cgac = relationship('CGAC', foreign_keys='FREC.cgac_id', cascade="delete")
icon_name = Column(Text)
class SubTierAgency(Base):
__tablename__ = "sub_tier_agency"
sub_tier_agency_id = Column(Integer, primary_key=True)
sub_tier_agency_code = Column(Text, nullable=False, index=True, unique=True)
sub_tier_agency_name = Column(Text)
cgac_id = Column(Integer, ForeignKey("cgac.cgac_id", name='fk_sub_tier_agency_cgac', ondelete="CASCADE"),
nullable=False)
cgac = relationship('CGAC', foreign_keys='SubTierAgency.cgac_id', cascade="delete")
priority = Column(Integer, nullable=False, default='2', server_default='2')
frec_id = Column(Integer, ForeignKey("frec.frec_id", name='fk_sub_tier_agency_frec', ondelete="CASCADE"),
nullable=True)
frec = relationship('FREC', foreign_keys='SubTierAgency.frec_id', cascade="delete")
is_frec = Column(Boolean, nullable=False, default=False, server_default="False")
class Office(Base):
__tablename__ = "office"
office_id = Column(Integer, primary_key=True)
office_code = Column(Text, nullable=False, index=True, unique=True)
office_name = Column(Text)
sub_tier_code = Column(Text, nullable=False, index=True)
agency_code = Column(Text, nullable=False, index=True)
contract_awards_office = Column(Boolean, nullable=False, default=False, server_default="False")
contract_funding_office = Column(Boolean, nullable=False, default=False, server_default="False")
financial_assistance_awards_office = Column(Boolean, nullable=False, default=False, server_default="False")
financial_assistance_funding_office = Column(Boolean, nullable=False, default=False, server_default="False")
class ObjectClass(Base):
__tablename__ = "object_class"
object_class_id = Column(Integer, primary_key=True)
object_class_code = Column(Text, nullable=False, index=True, unique=True)
object_class_name = Column(Text)
class SF133(Base):
"""Represents GTAS records"""
__tablename__ = "sf_133"
sf133_id = Column(Integer, primary_key=True)
agency_identifier = Column(Text, nullable=False, index=True)
allocation_transfer_agency = Column(Text, index=True)
availability_type_code = Column(Text)
beginning_period_of_availa = Column(Text)
ending_period_of_availabil = Column(Text)
main_account_code = Column(Text, nullable=False)
sub_account_code = Column(Text, nullable=False)
tas = Column(Text, nullable=False, default=concat_tas, index=True)
display_tas = Column(Text, default=concat_display_tas)
fiscal_year = Column(Integer, nullable=False, index=True)
period = Column(Integer, nullable=False, index=True)
line = Column(Integer, nullable=False)
amount = Column(Numeric, nullable=False, default=0, server_default="0")
account_num = Column(Integer, nullable=True)
disaster_emergency_fund_code = Column(Text, index=True)
Index("ix_sf_133_tas_group",
SF133.tas,
SF133.fiscal_year,
SF133.period,
SF133.line,
SF133.disaster_emergency_fund_code,
unique=True)
class TASFailedEdits(Base):
__tablename__ = "tas_failed_edits"
tas_failed_edits_id = Column(Integer, primary_key=True)
agency_identifier = Column(Text)
allocation_transfer_agency = Column(Text)
availability_type_code = Column(Text)
beginning_period_of_availa = Column(Text)
ending_period_of_availabil = Column(Text)
main_account_code = Column(Text)
sub_account_code = Column(Text)
tas = Column(Text, nullable=False, default=concat_tas, index=True)
display_tas = Column(Text, nullable=False, default=concat_display_tas, index=True)
fiscal_year = Column(Integer, nullable=False, index=True)
period = Column(Integer, nullable=False, index=True)
fr_entity_type = Column(Text)
fr_entity_description = Column(Text)
edit_number = Column(Text)
edit_id = Column(Text)
severity = Column(Text)
atb_submission_status = Column(Text)
approved_override_exists = Column(Boolean)
class ProgramActivity(Base):
__tablename__ = "program_activity"
program_activity_id = Column(Integer, primary_key=True)
fiscal_year_period = Column(Text, nullable=False, index=True)
agency_id = Column(Text, nullable=False, index=True)
allocation_transfer_id = Column(Text)
account_number = Column(Text, nullable=False, index=True)
program_activity_code = Column(Text, nullable=False, index=True)
program_activity_name = Column(Text, nullable=False, index=True)
Index("ix_pa_tas_pa",
ProgramActivity.fiscal_year_period,
ProgramActivity.agency_id,
ProgramActivity.allocation_transfer_id,
ProgramActivity.account_number,
ProgramActivity.program_activity_code,
ProgramActivity.program_activity_name,
unique=True)
class CountryCode(Base):
__tablename__ = "country_code"
country_code_id = Column(Integer, primary_key=True)
country_code = Column(Text, nullable=False, index=True, unique=True)
country_code_2_char = Column(Text, index=True)
country_name = Column(Text, nullable=False)
territory_free_state = Column(Boolean, nullable=False, default=False, server_default="False")
class SAMRecipient(Base):
""" DUNS Records """
__tablename__ = "sam_recipient"
sam_recipient_id = Column(Integer, primary_key=True)
uei = Column(Text, index=True)
awardee_or_recipient_uniqu = Column(Text, index=True)
legal_business_name = Column(Text)
dba_name = Column(Text)
activation_date = Column(Date, index=True)
deactivation_date = Column(Date, index=True)
registration_date = Column(Date, index=True)
expiration_date = Column(Date, index=True)
last_sam_mod_date = Column(Date)
address_line_1 = Column(Text)
address_line_2 = Column(Text)
city = Column(Text)
state = Column(Text)
zip = Column(Text)
zip4 = Column(Text)
country_code = Column(Text)
congressional_district = Column(Text)
entity_structure = Column(Text)
business_types_codes = Column(ARRAY(Text))
business_types = Column(ARRAY(Text))
ultimate_parent_uei = Column(Text)
ultimate_parent_unique_ide = Column(Text)
ultimate_parent_legal_enti = Column(Text)
high_comp_officer1_full_na = Column(Text)
high_comp_officer1_amount = Column(Text)
high_comp_officer2_full_na = Column(Text)
high_comp_officer2_amount = Column(Text)
high_comp_officer3_full_na = Column(Text)
high_comp_officer3_amount = Column(Text)
high_comp_officer4_full_na = Column(Text)
high_comp_officer4_amount = Column(Text)
high_comp_officer5_full_na = Column(Text)
high_comp_officer5_amount = Column(Text)
last_exec_comp_mod_date = Column(Date)
historic = Column(Boolean, default=False, server_default="False")
Index("ix_sam_recipient_uei_upper", sa.func.upper(SAMRecipient.uei))
class HistoricDUNS(Base):
""" Legacy DUNS Records with their latest updates """
__tablename__ = "historic_duns"
duns_id = Column(Integer, primary_key=True)
awardee_or_recipient_uniqu = Column(Text, index=True)
uei = Column(Text, index=True)
legal_business_name = Column(Text)
dba_name = Column(Text)
entity_structure = Column(Text)
activation_date = Column(Date)
registration_date = Column(Date)
expiration_date = Column(Date)
last_sam_mod_date = Column(Date)
address_line_1 = Column(Text)
address_line_2 = Column(Text)
city = Column(Text)
state = Column(Text)
zip = Column(Text)
zip4 = Column(Text)
country_code = Column(Text)
congressional_district = Column(Text)
business_types_codes = Column(ARRAY(Text))
business_types = Column(ARRAY(Text))
ultimate_parent_unique_ide = Column(Text)
ultimate_parent_uei = Column(Text)
ultimate_parent_legal_enti = Column(Text)
high_comp_officer1_full_na = Column(Text)
high_comp_officer1_amount = Column(Text)
high_comp_officer2_full_na = Column(Text)
high_comp_officer2_amount = Column(Text)
high_comp_officer3_full_na = Column(Text)
high_comp_officer3_amount = Column(Text)
high_comp_officer4_full_na = Column(Text)
high_comp_officer4_amount = Column(Text)
high_comp_officer5_full_na = Column(Text)
high_comp_officer5_amount = Column(Text)
Index("ix_historic_duns_uei_upper", sa.func.upper(HistoricDUNS.uei))
class CFDAProgram(Base):
__tablename__ = "cfda_program"
cfda_program_id = Column(Integer, primary_key=True)
program_number = Column(Float, nullable=False, index=True, unique=True)
program_title = Column(Text)
popular_name = Column(Text)
federal_agency = Column(Text)
authorization = Column(Text)
objectives = Column(Text)
types_of_assistance = Column(Text)
uses_and_use_restrictions = Column(Text)
applicant_eligibility = Column(Text)
beneficiary_eligibility = Column(Text)
credentials_documentation = Column(Text)
preapplication_coordination = Column(Text)
application_procedures = Column(Text)
award_procedure = Column(Text)
deadlines = Column(Text)
range_of_approval_disapproval_time = Column(Text)
website_address = Column(Text)
formula_and_matching_requirements = Column(Text)
length_and_time_phasing_of_assistance = Column(Text)
reports = Column(Text)
audits = Column(Text)
records = Column(Text)
account_identification = Column(Text)
obligations = Column(Text)
range_and_average_of_financial_assistance = Column(Text)
appeals = Column(Text)
renewals = Column(Text)
program_accomplishments = Column(Text)
regulations_guidelines_and_literature = Column(Text)
regional_or_local_office = Column(Text)
headquarters_office = Column(Text)
related_programs = Column(Text)
examples_of_funded_projects = Column(Text)
criteria_for_selecting_proposals = Column(Text)
url = Column(Text)
recovery = Column(Text)
omb_agency_code = Column(Text)
omb_bureau_code = Column(Text)
published_date = Column(Text, index=True)
archived_date = Column(Text, index=True)
class Zips(Base):
""" Zip and other address data for validation """
__tablename__ = "zips"
zips_id = Column(Integer, primary_key=True)
zip5 = Column(Text, index=True)
zip_last4 = Column(Text, index=True)
state_abbreviation = Column(Text, index=True)
county_number = Column(Text, index=True)
congressional_district_no = Column(Text, index=True)
__table_args__ = (UniqueConstraint('zip5', 'zip_last4', name='uniq_zip5_zip_last4'),)
Index("ix_zips_zip5_state_abbreviation_county_number",
Zips.zip5,
Zips.state_abbreviation,
Zips.county_number,
unique=False)
class ZipsGrouped(Base):
""" Zip and other address data without the final 4 digits for derivation """
__tablename__ = "zips_grouped"
zips_grouped_id = Column(Integer, primary_key=True)
zip5 = Column(Text, index=True)
state_abbreviation = Column(Text)
county_number = Column(Text)
congressional_district_no = Column(Text)
class CityCode(Base):
""" City code data and other useful, identifying location data """
__tablename__ = "city_code"
city_code_id = Column(Integer, primary_key=True)
feature_name = Column(Text)
feature_class = Column(Text)
city_code = Column(Text, index=True)
state_code = Column(Text, index=True)
county_number = Column(Text)
county_name = Column(Text)
latitude = Column(Text)
longitude = Column(Text)
class CountyCode(Base):
""" County code data per state """
__tablename__ = "county_code"
county_code_id = Column(Integer, primary_key=True)
county_number = Column(Text, index=True)
county_name = Column(Text)
state_code = Column(Text, index=True)
class States(Base):
""" State abbreviations and names """
__tablename__ = "states"
states_id = Column(Integer, primary_key=True)
state_code = Column(Text, index=True)
state_name = Column(Text)
fips_code = Column(Text)
class ZipCity(Base):
""" zip-5 to city name mapping """
__tablename__ = "zip_city"
zip_city_id = Column(Integer, primary_key=True)
zip_code = Column(Text)
city_name = Column(Text)
class StateCongressional(Base):
""" state to congressional district mapping """
__tablename__ = "state_congressional"
state_congressional_id = Column(Integer, primary_key=True)
state_code = Column(Text, index=True)
congressional_district_no = Column(Text, index=True)
census_year = Column(Integer, index=True)
Index("ix_sc_state_cd",
StateCongressional.state_code,
StateCongressional.congressional_district_no,
unique=True)
class DEFC(Base):
__tablename__ = "defc"
defc_id = Column(Integer, primary_key=True)
code = Column(Text, nullable=False, index=True, unique=True)
group = Column(Text)
class ExternalDataType(Base):
""" external data type mapping """
__tablename__ = "external_data_type"
external_data_type_id = Column(Integer, primary_key=True)
name = Column(Text)
description = Column(Text)
class ExternalDataLoadDate(Base):
""" data load dates corresponding to external data types """
__tablename__ = "external_data_load_date"
external_data_load_date_id = Column(Integer, primary_key=True)
last_load_date_start = Column(DateTime)
last_load_date_end = Column(DateTime)
external_data_type_id = Column(Integer, ForeignKey("external_data_type.external_data_type_id",
name="fk_external_data_type_id"), unique=True)
external_data_type = relationship("ExternalDataType", uselist=False)
|
cc0-1.0
|
47f665049ef73b5cafc24c1e2b56bb01
| 37.940375
| 119
| 0.682999
| 3.380861
| false
| false
| false
| false
|
fedspendingtransparency/data-act-broker-backend
|
tests/unit/dataactvalidator/test_sf133_content.py
|
1
|
14733
|
import re
import os
from dataactcore.config import CONFIG_BROKER
from dataactvalidator.scripts import load_sf133
SF_RE = re.compile(r'sf_133_(?P<year>\d{4})_(?P<period>\d{2})\.csv')
line_sums = {}
def sum_range(tas, start, end, target, join_array, failed_validations, tas_str, extra_line=None):
""" Rule: sum of lines [start] through [end] = [target] """
lines_start_to_end = tas[tas.line.isin(list(map(str, range(start, end + 1))))]
line_target = tas[tas.line == str(target)]
sum_lines_start_to_end = float("{0:.2f}".format(lines_start_to_end['amount'].astype(float).sum()))
if extra_line:
line = tas[tas.line.isin([str(extra_line)])]
line_amount = float("{0:.2f}".format(line['amount'].astype(float).sum()))
sum_lines_start_to_end += line_amount
line_target_amount = float("{0:.2f}".format(line_target['amount'].astype(float).sum()))
if line_target_amount != sum_lines_start_to_end:
error_message = 'Sum of lines {start} through {end} != {target}'.format(start=start, end=end, target=target)
if extra_line:
error_message = 'Sum of lines {start} through {end} + {extra_line} != {target}'.\
format(start=start, end=end, extra_line=extra_line, target=target)
failed_validations.append(','.join(join_array + [error_message, "{0:.2f}".format(line_target_amount),
"{0:.2f}".format(sum_lines_start_to_end)]))
sum_key = "sum_lines_{start}_through_{end}".format(start=start, end=end)
sum_dict = {sum_key: sum_lines_start_to_end, target: line_target_amount}
if tas_str not in line_sums:
line_sums[tas_str] = sum_dict
else:
line_sums[tas_str].update(sum_dict)
def sum_list(tas_str, line_list):
line_amount_list = [line_sums[tas_str][key] for key in line_list if key in line_sums[tas_str]]
return "{:.2f}".format(sum(line_amount_list))
def test_sf133_files(database):
"""Test sums of all TAS's in any unloaded SF-133 files"""
failed_validations = ['file,aid,ata,availability_type_code,bpoa,epoa,main_account,sub_account,'
'error_type,value1,value2']
# get a list of SF 133 files to test
sf133_list = load_sf133.get_sf133_list(os.path.join(CONFIG_BROKER['path'], 'dataactvalidator', 'config',
'to_validate'))
# test data in each SF 133 file
for sf133 in sf133_list:
# skipping files with invalid name
file_match = SF_RE.match(sf133.file)
if file_match:
file_index = "{0}_{1}".format(file_match.group('year'), file_match.group('period'))
data = load_sf133.clean_sf133_data(sf133.full_file, None)
# sort data by unique TAS
data.sort_values(by=['tas'], inplace=True)
data.set_index(keys=['tas'], drop=False, inplace=True)
# iterate through data by TAS
for _, tas in data.groupby(['tas']):
current_tas = tas.iloc[0]
tas_list = [current_tas['agency_identifier'], current_tas['allocation_transfer_agency'],
current_tas['availability_type_code'], current_tas['beginning_period_of_availa'],
current_tas['ending_period_of_availabil'], current_tas['main_account_code'],
current_tas['sub_account_code']]
join_array = [file_index] + tas_list
tas_str = ''.join(tas_list)
# Rule: sum of lines 1000 through 1042 = 1050
sum_range(tas=tas, start=1000, end=1042, target=1050, join_array=join_array,
failed_validations=failed_validations, tas_str=tas_str)
# Rule: sum of lines 1100 through 1153 = 1160
sum_range(tas=tas, start=1100, end=1153, target=1160, join_array=join_array,
failed_validations=failed_validations, tas_str=tas_str)
# Rule: sum of lines 1170 through 1176 = 1180
sum_range(tas=tas, start=1170, end=1176, target=1180, join_array=join_array,
failed_validations=failed_validations, tas_str=tas_str)
# Rule: sum of lines 1200 through 1252 = 1260
sum_range(tas=tas, start=1200, end=1252, target=1260, join_array=join_array,
failed_validations=failed_validations, tas_str=tas_str)
# Rule: sum of lines 1270 through 1276 = 1280
sum_range(tas=tas, start=1270, end=1276, target=1280, join_array=join_array,
failed_validations=failed_validations, tas_str=tas_str)
# Rule: sum of lines 1300 through 1330 = 1340
sum_range(tas=tas, start=1300, end=1330, target=1340, join_array=join_array,
failed_validations=failed_validations, tas_str=tas_str)
# Rule: sum of lines 1400 through 1430 = 1440
sum_range(tas=tas, start=1400, end=1430, target=1440, join_array=join_array,
failed_validations=failed_validations, tas_str=tas_str)
# Rule: sum of lines 1500 through 1531 = 1540
sum_range(tas=tas, start=1500, end=1531, target=1540, join_array=join_array,
failed_validations=failed_validations, tas_str=tas_str)
# Rule: sum of lines 1600 through 1631 = 1640
sum_range(tas=tas, start=1600, end=1631, target=1640, join_array=join_array,
failed_validations=failed_validations, tas_str=tas_str)
# Rule: sum of lines 1700 through 1742 = 1750
sum_range(tas=tas, start=1700, end=1742, target=1750, join_array=join_array,
failed_validations=failed_validations, tas_str=tas_str)
# Rule: sum of lines 1800 through 1842 = 1850
sum_range(tas=tas, start=1800, end=1842, target=1850, join_array=join_array,
failed_validations=failed_validations, tas_str=tas_str)
line_1900_amount = "{:.2f}".format(tas[tas.line == '1900'].amount.astype(float).sum())
line_1910_amount = "{:.2f}".format(tas[tas.line == '1910'].amount.astype(float).sum())
# Rule: 1160 + 1180 + 1260 + 1280 + 1340 + 1440 + 1540 + 1640 + 1750 + 1850 = 1900
line_list = [1160, 1180, 1260, 1280, 1340, 1440, 1540, 1640, 1750, 1850]
line_amount_list_sum = sum_list(tas_str, line_list)
if line_1900_amount != line_amount_list_sum:
failed_validations.append(','.join(join_array + ['1160 + 1180 + 1260 + 1280 + 1340 + 1440 + 1540 + '
'1640 + 1750 + 1850 != 1900',
line_1900_amount,
line_amount_list_sum]))
# Rule: 1050 + 1900 = 1910
line_amount_list_sum = "{:.2f}".format(float(sum_list(tas_str, [1050])) + float(line_1900_amount))
if line_1910_amount != line_amount_list_sum:
failed_validations.append(','.join(join_array + ['1050 + 1900 != 1910',
line_1910_amount,
line_amount_list_sum]))
# Rule: sum of lines 1100 through 1153 +sum of lines 1170 through 1176 +sum of lines 1200 through 1252
# +sum of lines 1270 through 1276 +sum of lines 1300 through 1330 +sum of lines 1400 through 1430 +sum
# of lines 1500 through 1531 +sum of lines 1600 through 1631 +sum of lines 1700 through 1742 +sum of
# lines 1800 through 1842 = 1900
key_list = ['sum_lines_1100_through_1153', 'sum_lines_1170_through_1176', 'sum_lines_1200_through_1252',
'sum_lines_1270_through_1276', 'sum_lines_1300_through_1330', 'sum_lines_1400_through_1430',
'sum_lines_1500_through_1531', 'sum_lines_1600_through_1631', 'sum_lines_1700_through_1742',
'sum_lines_1800_through_1842']
key_list_sum = sum_list(tas_str, key_list)
if line_1900_amount != key_list_sum:
failed_validations.append(','.join(join_array + ['Sum of the sum of lines != 1900',
line_1900_amount,
key_list_sum]))
# Rule: sum of lines 1000 through 1042 +sum of lines 1100 through 1153 +sum of lines 1170 through 1176
# +sum of lines 1200 through 1252 +sum of lines 1270 through 1276 +sum of lines 1300 through 1330 +sum
# of lines 1400 through 1430 +sum of lines 1500 through 1531 +sum of lines 1600 through 1631 +sum of
# lines 1700 through 1742 +sum of lines 1800 through 1842 = 1910
key_list = ['sum_lines_1000_through_1042', 'sum_lines_1100_through_1153', 'sum_lines_1170_through_1176',
'sum_lines_1200_through_1252', 'sum_lines_1270_through_1276', 'sum_lines_1300_through_1330',
'sum_lines_1400_through_1430', 'sum_lines_1500_through_1531', 'sum_lines_1600_through_1631',
'sum_lines_1700_through_1742', 'sum_lines_1800_through_1842']
key_list_sum = sum_list(tas_str, key_list)
if line_1910_amount != key_list_sum:
failed_validations.append(','.join(join_array + ['Sum of the sum of lines != 1910',
line_1910_amount,
key_list_sum]))
# Turning this rule off until it is deemed necessary
#
# # Rule: sum of lines 2001 through 2003 = 2004
# sum_range(tas=tas, start=2001, end=2003, target=2004, join_array=join_array,
# failed_validations=failed_validations, tas_str=tas_str)
#
# # Rule: sum of lines 2101 through 2103 = 2104
# sum_range(tas=tas, start=2101, end=2103, target=2104, join_array=join_array,
# failed_validations=failed_validations, tas_str=tas_str)
# Rule: 2004 + 2104 = 2190
line_2004 = tas[tas.line.isin(['2004'])]
line_2004_amount = float("{0:.2f}".format(line_2004['amount'].astype(float).sum()))
line_2104 = tas[tas.line.isin(['2104'])]
line_2104_amount = float("{0:.2f}".format(line_2104['amount'].astype(float).sum()))
line_2190_amount = "{:.2f}".format(tas[tas.line == '2190'].amount.astype(float).sum())
line_amount_sum = "{:.2f}".format(line_2004_amount + line_2104_amount)
if line_2190_amount != line_amount_sum:
failed_validations.append(','.join(join_array + ['2004 + 2104 != 2190',
line_2190_amount,
line_amount_sum]))
# Rule: 2170 + 2180 = 2190
line_2170_amount = "{:.2f}".format(tas[tas.line == '2170'].amount.astype(float).sum())
line_2180_amount = "{:.2f}".format(tas[tas.line == '2180'].amount.astype(float).sum())
line_amount_sum = "{:.2f}".format(float(line_2170_amount) + float(line_2180_amount))
if line_2190_amount != line_amount_sum:
failed_validations.append(','.join(join_array + ['2170 + 2180 != 2190',
line_2190_amount,
line_amount_sum]))
# Rule: 2201 + 2202 + 2203 + 2301 + 2302 + 2303 + 2401 + 2402 + 2403 + 2413 = 2490
line_2490_amount = "{:.2f}".format(tas[tas.line == '2490'].amount.astype(float).sum())
lines_2201_to_2203 = tas[tas.line.isin(list(map(str, range(2201, 2204))))]
sum_lines = float("{0:.2f}".format(lines_2201_to_2203['amount'].astype(float).sum()))
lines_2301_to_2303 = tas[tas.line.isin(list(map(str, range(2301, 2304))))]
sum_lines += float("{0:.2f}".format(lines_2301_to_2303['amount'].astype(float).sum()))
lines_2401_to_2403 = tas[tas.line.isin(list(map(str, range(2401, 2404))))]
sum_lines += float("{0:.2f}".format(lines_2401_to_2403['amount'].astype(float).sum()))
line_2413 = tas[tas.line.isin(['2413'])]
line_2143_amount = float("{0:.2f}".format(line_2413['amount'].astype(float).sum()))
sum_lines += line_2143_amount
sum_lines = "{:.2f}".format(sum_lines)
if line_2490_amount != sum_lines:
failed_validations.append(','.join(join_array + ['2201 + 2202 + 2203 + 2301 + 2302 + 2303 + 2401 + '
'2402 + 2403 + 2413 = 2490', line_2490_amount,
sum_lines]))
# Rule: 2412 + 2413 = 2490
sum_range(tas=tas, start=2412, end=2413, target=2490, join_array=join_array,
failed_validations=failed_validations, tas_str=tas_str)
# Turning this rule off until it is deemed necessary
#
# # Rule: (sum of lines 2001 through 2403) + 2413 = 2500
# sum_range(tas=tas, start=2001, end=2403, target=2500, join_array=join_array,
# failed_validations=failed_validations, tas_str=tas_str, extra_line=2413)
# Rule: 1910 = 2500
line_amount = "{:.2f}".format(tas[tas.line == '2500'].amount.astype(float).sum())
if line_1910_amount != line_amount:
failed_validations.append(','.join(join_array + ['1910 != 2500',
line_1910_amount,
line_amount]))
assert len(failed_validations) == 1, "\n".join(str(failure) for failure in failed_validations)
|
cc0-1.0
|
96a0eba7670c660f00867b6c5eb3fe23
| 59.381148
| 120
| 0.524265
| 3.686015
| false
| false
| false
| false
|
fedspendingtransparency/data-act-broker-backend
|
dataactcore/migrations/versions/bab396e50b1f_create_application_type_table_and_.py
|
1
|
1549
|
"""create application_type table and update submission window
Revision ID: bab396e50b1f
Revises: 0ca75433c435
Create Date: 2017-08-03 14:45:39.186161
"""
# revision identifiers, used by Alembic.
revision = 'bab396e50b1f'
down_revision = '0ca75433c435'
branch_labels = None
depends_on = None
from alembic import op
import sqlalchemy as sa
def upgrade(engine_name):
globals()["upgrade_%s" % engine_name]()
def downgrade(engine_name):
globals()["downgrade_%s" % engine_name]()
def upgrade_data_broker():
### commands auto generated by Alembic - please adjust! ###
op.create_table('application_type',
sa.Column('created_at', sa.DateTime(), nullable=True),
sa.Column('updated_at', sa.DateTime(), nullable=True),
sa.Column('application_type_id', sa.Integer(), nullable=False),
sa.Column('application_name', sa.Text(), nullable=False),
sa.PrimaryKeyConstraint('application_type_id')
)
op.add_column('submission_window', sa.Column('application_type_id', sa.Integer(), nullable=True))
op.create_foreign_key('fk_submission_window_application', 'submission_window', 'application_type', ['application_type_id'], ['application_type_id'])
### end Alembic commands ###
def downgrade_data_broker():
### commands auto generated by Alembic - please adjust! ###
op.drop_constraint('fk_submission_window_application', 'submission_window', type_='foreignkey')
op.drop_column('submission_window', 'application_type_id')
op.drop_table('application_type')
### end Alembic commands ###
|
cc0-1.0
|
1cca37d49c5f3f238a1f44ff6871a522
| 29.98
| 152
| 0.701743
| 3.569124
| false
| false
| false
| false
|
fedspendingtransparency/data-act-broker-backend
|
dataactcore/migrations/versions/06d5bc68c29a_making_historicduns_table.py
|
1
|
2336
|
"""Making HistoricDuns table
Revision ID: 06d5bc68c29a
Revises: 653d47c65df8
Create Date: 2019-07-24 20:46:56.121706
"""
# revision identifiers, used by Alembic.
revision = '06d5bc68c29a'
down_revision = '653d47c65df8'
branch_labels = None
depends_on = None
from alembic import op
import sqlalchemy as sa
def upgrade(engine_name):
globals()["upgrade_%s" % engine_name]()
def downgrade(engine_name):
globals()["downgrade_%s" % engine_name]()
def upgrade_data_broker():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('historic_duns',
sa.Column('created_at', sa.DateTime(), nullable=True),
sa.Column('updated_at', sa.DateTime(), nullable=True),
sa.Column('duns_id', sa.Integer(), nullable=False),
sa.Column('awardee_or_recipient_uniqu', sa.Text(), nullable=True),
sa.Column('legal_business_name', sa.Text(), nullable=True),
sa.Column('dba_name', sa.Text(), nullable=True),
sa.Column('activation_date', sa.Date(), nullable=True),
sa.Column('registration_date', sa.Date(), nullable=True),
sa.Column('expiration_date', sa.Date(), nullable=True),
sa.Column('last_sam_mod_date', sa.Date(), nullable=True),
sa.Column('address_line_1', sa.Text(), nullable=True),
sa.Column('address_line_2', sa.Text(), nullable=True),
sa.Column('city', sa.Text(), nullable=True),
sa.Column('state', sa.Text(), nullable=True),
sa.Column('zip', sa.Text(), nullable=True),
sa.Column('zip4', sa.Text(), nullable=True),
sa.Column('country_code', sa.Text(), nullable=True),
sa.Column('congressional_district', sa.Text(), nullable=True),
sa.Column('business_types_codes', sa.ARRAY(sa.Text()), nullable=True),
sa.Column('ultimate_parent_unique_ide', sa.Text(), nullable=True),
sa.Column('ultimate_parent_legal_enti', sa.Text(), nullable=True),
sa.PrimaryKeyConstraint('duns_id')
)
op.create_index(op.f('ix_historic_duns_awardee_or_recipient_uniqu'), 'historic_duns', ['awardee_or_recipient_uniqu'], unique=False)
# ### end Alembic commands ###
def downgrade_data_broker():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_index(op.f('ix_historic_duns_awardee_or_recipient_uniqu'), table_name='historic_duns')
op.drop_table('historic_duns')
# ### end Alembic commands ###
|
cc0-1.0
|
6981916847a50e1e3e3354ace1b1f2c8
| 34.938462
| 135
| 0.672517
| 3.073684
| false
| false
| false
| false
|
fedspendingtransparency/data-act-broker-backend
|
dataactcore/migrations/versions/0cf297fa927c_adding_unique_transaction_keys_to_.py
|
1
|
1146
|
"""Adding unique transaction keys to staging models
Revision ID: 0cf297fa927c
Revises: 94efce2e7882
Create Date: 2019-03-21 17:14:34.938006
"""
# revision identifiers, used by Alembic.
revision = '0cf297fa927c'
down_revision = '94efce2e7882'
branch_labels = None
depends_on = None
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import postgresql
def upgrade(engine_name):
globals()["upgrade_%s" % engine_name]()
def downgrade(engine_name):
globals()["downgrade_%s" % engine_name]()
def upgrade_data_broker():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('award_financial_assistance', sa.Column('afa_generated_unique', sa.Text(), nullable=True))
op.add_column('award_procurement', sa.Column('detached_award_proc_unique', sa.Text(), nullable=True))
# ### end Alembic commands ###
def downgrade_data_broker():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_column('award_procurement', 'detached_award_proc_unique')
op.drop_column('award_financial_assistance', 'afa_generated_unique')
# ### end Alembic commands ###
|
cc0-1.0
|
d59185d14b39cea3ea47e6f747761d7e
| 26.285714
| 108
| 0.707679
| 3.350877
| false
| false
| false
| false
|
fedspendingtransparency/data-act-broker-backend
|
dataactbroker/helpers/pandas_helper.py
|
1
|
2685
|
import pandas as pd
import logging
from dataactcore.interfaces.db import GlobalDB
logger = logging.getLogger(__name__)
def check_dataframe_diff(new_data, model, del_cols, sort_cols, lambda_funcs=None):
""" Checks if 2 dataframes (the new data and the existing data for a model) are different.
Args:
new_data: dataframe containing the new data to compare
model: The model to get the existing data from
del_cols: An array containing the columns to delete from the existing data (usually id and foreign keys)
sort_cols: An array containing the columns to sort on
lambda_funcs: An array of tuples (column to update, transformative lambda taking in a row argument)
that will be processed in the order provided.
Returns:
True if there are differences between the two dataframes, false otherwise
"""
if not lambda_funcs:
lambda_funcs = {}
new_data_copy = new_data.copy(deep=True)
# Drop the created_at and updated_at columns from the new data so they don't cause differences
try:
new_data_copy.drop(['created_at', 'updated_at'], axis=1, inplace=True)
except ValueError:
logger.info('created_at or updated_at column not found, drop skipped.')
sess = GlobalDB.db().session
current_data = pd.read_sql_table(model.__table__.name, sess.connection(), coerce_float=False)
# Apply any lambda functions provided to update values if needed
if not current_data.empty:
for col_name, lambda_func in lambda_funcs:
current_data[col_name] = current_data.apply(lambda_func, axis=1)
# Drop the created_at and updated_at for the same reason as above, also drop the pk ID column for this table
try:
current_data.drop(['created_at', 'updated_at'] + del_cols, axis=1, inplace=True)
except ValueError:
logger.info('created_at, updated_at, or at least one of the columns provided for deletion not found,'
' drop skipped.')
# pandas comparison requires everything to be in the same order
new_data_copy.sort_values(by=sort_cols, inplace=True)
current_data.sort_values(by=sort_cols, inplace=True)
# Columns have to be in order too
cols = new_data_copy.columns.tolist()
cols.sort()
new_data_copy = new_data_copy[cols]
cols = current_data.columns.tolist()
cols.sort()
current_data = current_data[cols]
# Reset indexes after sorting, so that they match
new_data_copy.reset_index(drop=True, inplace=True)
current_data.reset_index(drop=True, inplace=True)
return not new_data_copy.equals(current_data)
|
cc0-1.0
|
1b0fca089649511a883c13727f998efd
| 39.681818
| 116
| 0.676723
| 3.977778
| false
| false
| false
| false
|
fedspendingtransparency/data-act-broker-backend
|
dataactcore/interfaces/function_bag.py
|
1
|
30559
|
import logging
from operator import attrgetter
import time
import uuid
from datetime import datetime
from sqlalchemy import func, or_
from sqlalchemy.orm import joinedload
from sqlalchemy.orm.exc import NoResultFound
from dataactcore.aws.s3Handler import S3Handler
from dataactcore.config import CONFIG_BROKER
from dataactcore.models.domainModels import ExternalDataLoadDate
from dataactcore.models.errorModels import ErrorMetadata, File
from dataactcore.models.jobModels import (Job, Submission, JobDependency, PublishHistory, PublishedFilesHistory,
SubmissionWindowSchedule)
from dataactcore.models.stagingModels import FABS
from dataactcore.models.userModel import User, EmailTemplateType, EmailTemplate
from dataactcore.models.validationModels import RuleSeverity
from dataactcore.models.views import SubmissionUpdatedView
from dataactcore.models.lookups import (FILE_TYPE_DICT, FILE_STATUS_DICT, JOB_TYPE_DICT,
JOB_STATUS_DICT, FILE_TYPE_DICT_ID, PUBLISH_STATUS_DICT,
EXTERNAL_DATA_TYPE_DICT)
from dataactcore.interfaces.db import GlobalDB
from dataactvalidator.validation_handlers.validationError import ValidationError
from dataactcore.aws.sqsHandler import sqs_queue
# This is a holding place for functions from a previous iteration of broker databases and database access code.
# Work still to do:
# - simplify functions
# - move functions to a better place?
# - replace GlobalDB function, which is deprecated now that db logic is refactored
logger = logging.getLogger(__name__)
# todo: move these value to config if it is decided to keep local user login long term
HASH_ROUNDS = 12
def create_user_with_password(email, password, bcrypt, website_admin=False):
"""Convenience function to set up fully-baked user (used for setup/testing only)."""
sess = GlobalDB.db().session
user = User(
email=email, name='Administrator',
title='System Admin', website_admin=website_admin
)
user.salt, user.password_hash = get_password_hash(password, bcrypt)
sess.add(user)
sess.commit()
return user
def get_password_hash(password, bcrypt):
"""Generate password hash."""
# TODO: handle password hashing/lookup in the User model
salt = uuid.uuid4().hex
# number 12 below iw the number of rounds for bcrypt
encoded_hash = bcrypt.generate_password_hash(password + salt, HASH_ROUNDS)
password_hash = encoded_hash.decode('utf-8')
return salt, password_hash
def populate_job_error_info(job):
""" Set number of errors and warnings for specified job. """
sess = GlobalDB.db().session
job.number_of_errors = sess.query(func.sum(ErrorMetadata.occurrences)).\
join(ErrorMetadata.severity).\
filter(ErrorMetadata.job_id == job.job_id, RuleSeverity.name == 'fatal').\
scalar() or 0
job.number_of_warnings = sess.query(func.sum(ErrorMetadata.occurrences)).\
join(ErrorMetadata.severity).\
filter(ErrorMetadata.job_id == job.job_id, RuleSeverity.name == 'warning').\
scalar() or 0
sess.commit()
def sum_number_of_errors_for_job_list(submission_id, error_type='fatal'):
"""Add number of errors for all jobs in list."""
sess = GlobalDB.db().session
error_sum = 0
jobs = sess.query(Job).filter(Job.submission_id == submission_id).all()
for job in jobs:
if error_type == 'fatal':
error_sum += job.number_of_errors
elif error_type == 'warning':
error_sum += job.number_of_warnings
return error_sum
""" ERROR DB FUNCTIONS """
def get_error_type(job_id):
""" Returns either "none", "header_errors", or "row_errors" depending on what errors occurred during validation """
sess = GlobalDB.db().session
file_status_name = sess.query(File).options(joinedload('file_status')).\
filter(File.job_id == job_id).one().file_status.name
if file_status_name == 'header_error':
# Header errors occurred, return that
return 'header_errors'
elif sess.query(Job).filter(Job.job_id == job_id).one().number_of_errors > 0:
# Row errors occurred
return 'row_errors'
else:
# No errors occurred during validation
return 'none'
def create_file_if_needed(job_id, filename=None):
""" Return the existing file object if it exists, or create a new one """
sess = GlobalDB.db().session
try:
file_rec = sess.query(File).filter(File.job_id == job_id).one()
# Set new filename for changes to an existing submission
file_rec.filename = filename
except NoResultFound:
file_rec = create_file(job_id, filename)
return file_rec
def create_file(job_id, filename):
""" Create a new file object for specified job and filename """
sess = GlobalDB.db().session
try:
int(job_id)
except Exception:
logger.error({
'message': 'Bad job_id: {}'.format(job_id),
'message_type': 'CoreError',
'job_id': job_id,
'function': 'create_file'
})
raise ValueError('Bad job_id: {}'.format(job_id))
file_rec = File(job_id=job_id, filename=filename, file_status_id=FILE_STATUS_DICT['incomplete'])
sess.add(file_rec)
sess.commit()
return file_rec
def write_file_error(job_id, filename, error_type, extra_info=None):
""" Write a file-level error to the file table
Args:
job_id: ID of job in job tracker
filename: name of error report in S3
error_type: type of error, value will be mapped to ValidationError class
extra_info: list of extra information to be included in file
"""
sess = GlobalDB.db().session
try:
int(job_id)
except Exception:
logger.error({
'message': 'Bad job_id: {}'.format(job_id),
'message_type': 'CoreError',
'job_id': job_id,
'function': 'write_file_error'
})
raise ValueError('Bad job_id: {}'.format(job_id))
# Get File object for this job ID or create it if it doesn't exist
file_rec = create_file_if_needed(job_id, filename)
# Mark error type and add header info if present
file_rec.file_status_id = FILE_STATUS_DICT[ValidationError.get_error_type_string(error_type)]
if extra_info is not None:
if 'missing_headers' in extra_info:
file_rec.headers_missing = extra_info['missing_headers']
if 'duplicated_headers' in extra_info:
file_rec.headers_duplicated = extra_info['duplicated_headers']
sess.add(file_rec)
sess.commit()
def mark_file_complete(job_id, filename=None):
""" Marks file's status as complete
Args:
job_id: ID of job in job tracker
filename: name of error report in S3
"""
sess = GlobalDB.db().session
file_complete = create_file_if_needed(job_id, filename)
file_complete.file_status_id = FILE_STATUS_DICT['complete']
sess.commit()
def get_error_metrics_by_job_id(job_id, include_file_types=False, severity_id=None):
""" Get error metrics for specified job, including number of errors for each field name and error type """
sess = GlobalDB.db().session
result_list = []
query_result = sess.query(File).options(joinedload('file_status')).filter(File.job_id == job_id).one()
if not query_result.file_status.file_status_id == FILE_STATUS_DICT['complete']:
return [{'field_name': 'File Level Error', 'error_name': query_result.file_status.name,
'error_description': query_result.file_status.description, 'occurrences': 1, 'rule_failed': ''}]
query_result = sess.query(ErrorMetadata).options(joinedload('error_type')).filter(
ErrorMetadata.job_id == job_id, ErrorMetadata.severity_id == severity_id).all()
for result in query_result:
record_dict = {'field_name': result.field_name, 'error_name': result.error_type.name,
'error_description': result.error_type.description, 'occurrences': result.occurrences,
'rule_failed': result.rule_failed, 'original_label': result.original_rule_label}
if include_file_types:
record_dict['source_file'] = FILE_TYPE_DICT_ID.get(result.file_type_id, '')
record_dict['target_file'] = FILE_TYPE_DICT_ID.get(result.target_file_type_id, '')
result_list.append(record_dict)
return result_list
def get_email_template(email_type):
""" Get template for specified email type
Arguments:
email_type - Name of template to get
Returns:
EmailTemplate object
"""
sess = GlobalDB.db().session
type_result = sess.query(EmailTemplateType.email_template_type_id).\
filter(EmailTemplateType.name == email_type).one()
template_result = sess.query(EmailTemplate).\
filter(EmailTemplate.template_type_id == type_result.email_template_type_id).one()
return template_result
def check_correct_password(user, password, bcrypt):
""" Given a user object and a password, verify that the password is correct.
Arguments:
user - User object
password - Password to check
bcrypt - bcrypt to use for password hashing
Returns:
True if valid password, False otherwise.
"""
if password is None or password.strip() == "":
# If no password or empty password, reject
return False
# Check the password with bcrypt
return bcrypt.check_password_hash(user.password_hash, password + user.salt)
def run_job_checks(job_id):
""" Checks that specified job has no unsatisfied prerequisites
Args:
job_id -- job_id of job to be run
Returns:
True if prerequisites are satisfied, False if not
"""
sess = GlobalDB.db().session
# Get count of job's prerequisites that are not yet finished
incomplete_dependencies = sess.query(JobDependency). \
join('prerequisite_job'). \
filter(JobDependency.job_id == job_id, Job.job_status_id != JOB_STATUS_DICT['finished']). \
count()
if incomplete_dependencies:
return False
else:
return True
def mark_job_status(job_id, status_name, skip_check=False):
"""
Mark job as having specified status.
Jobs being marked as finished will add dependent jobs to queue.
Args:
job_id: ID for job being marked
status_name: Status to change job to
"""
sess = GlobalDB.db().session
job = sess.query(Job).filter(Job.job_id == job_id).one()
old_status = job.job_status.name
# update job status
job.job_status_id = JOB_STATUS_DICT[status_name]
if status_name in ('ready', 'waiting'):
job.error_message = None
sess.commit()
# if status is changed to finished for the first time, check dependencies
# and add to the job queue as necessary
if old_status != 'finished' and status_name == 'finished' and not skip_check:
check_job_dependencies(job_id)
def check_job_dependencies(job_id):
""" For specified job, check which of its dependencies are ready to be started and add them to the queue
Args:
job_id: the ID of the job that was just finished
Raises:
ValueError: If the job provided is not finished
"""
sess = GlobalDB.db().session
log_data = {
'message_type': 'CoreError',
'job_id': job_id
}
# raise exception if current job is not actually finished
job = sess.query(Job).filter(Job.job_id == job_id).one()
if job.job_status_id != JOB_STATUS_DICT['finished']:
log_data['message'] = 'Current job not finished, unable to check dependencies'
logger.error(log_data)
raise ValueError('Current job not finished, unable to check dependencies')
# get the jobs that are dependent on job_id being finished
dependencies = sess.query(JobDependency).filter_by(prerequisite_id=job_id).all()
for dependency in dependencies:
dep_job_id = dependency.job_id
if dependency.dependent_job.job_status_id != JOB_STATUS_DICT['waiting']:
log_data['message_type'] = 'CoreError'
log_data['message'] = '{} (dependency of {}) is not in a \'waiting\' state'.format(dep_job_id, job_id)
logger.error(log_data)
else:
# find the number of this job's prerequisites that do not have a status of 'finished' or have errors.
unfinished_prerequisites = sess.query(JobDependency).\
join(Job, JobDependency.prerequisite_job).\
filter(or_(Job.job_status_id != JOB_STATUS_DICT['finished'], Job.number_of_errors > 0),
JobDependency.job_id == dep_job_id).\
count()
if unfinished_prerequisites == 0:
# this job has no unfinished prerequisite jobs, so it is eligible to be set to a 'ready' status and
# added to the queue
mark_job_status(dep_job_id, 'ready')
# Only want to send validation jobs to the queue, other job types should be forwarded
if dependency.dependent_job.job_type_name in ['csv_record_validation', 'validation']:
# add dep_job_id to the SQS job queue
log_data['message_type'] = 'CoreInfo'
log_data['message'] = 'Sending job {} to job manager in sqs'.format(dep_job_id)
logger.info(log_data)
queue = sqs_queue()
response = queue.send_message(MessageBody=str(dep_job_id))
log_data['message'] = 'Send message response: {}'.format(response)
logger.info(log_data)
def create_jobs(upload_files, submission, existing_submission=False):
"""Create the set of jobs associated with the specified submission
Arguments:
upload_files -- list of named tuples that describe files uploaded to the broker
submission -- submission
existing_submission -- true if we should update jobs in an existing submission rather than creating new jobs
Returns:
Dictionary of upload ids by filename to return to client, used for calling finalize_submission route
"""
sess = GlobalDB.db().session
submission_id = submission.submission_id
# create the file upload and single-file validation jobs and
# set up the dependencies between them
# before starting, sort the incoming list of jobs by letter
# to ensure that jobs dependent on the awards jobs being present
# are processed last.
jobs_required = []
upload_dict = {}
sorted_uploads = sorted(upload_files, key=attrgetter('file_letter'))
for upload_file in sorted_uploads:
validation_job_id, upload_job_id = add_jobs_for_uploaded_file(upload_file, submission_id, existing_submission)
if validation_job_id:
jobs_required.append(validation_job_id)
upload_dict[upload_file.file_type] = upload_job_id
# once single-file upload/validation jobs are created, create the cross-file
# validation job and dependencies
if existing_submission and not submission.is_fabs:
# find cross-file jobs and mark them as waiting
# (note: job_type of 'validation' is a cross-file job)
val_job = sess.query(Job).\
filter_by(
submission_id=submission_id,
job_type_id=JOB_TYPE_DICT['validation']).\
one()
val_job.job_status_id = JOB_STATUS_DICT['waiting']
submission.updated_at = time.strftime('%c')
elif not submission.is_fabs:
# create cross-file validation job
validation_job = Job(
job_status_id=JOB_STATUS_DICT['waiting'],
job_type_id=JOB_TYPE_DICT['validation'],
submission_id=submission_id)
sess.add(validation_job)
sess.flush()
# create dependencies for validation jobs
for job_id in jobs_required:
val_dependency = JobDependency(job_id=validation_job.job_id, prerequisite_id=job_id)
sess.add(val_dependency)
sess.commit()
upload_dict['submission_id'] = submission_id
return upload_dict
def add_jobs_for_uploaded_file(upload_file, submission_id, existing_submission):
""" Add upload and validation jobs for a single filetype
Arguments:
upload_file: UploadFile named tuple
submission_id: submission ID to attach to jobs
existing_submission: true if we should update existing jobs rather than creating new ones
Returns:
the validation job id for this file type (if any)
the upload job id for this file type
"""
sess = GlobalDB.db().session
file_type_id = FILE_TYPE_DICT[upload_file.file_type]
validation_job_id = None
# Create a file upload job or, for an existing submission, modify the
# existing upload job.
if existing_submission:
# mark existing upload job as running
upload_job = sess.query(Job).filter_by(
submission_id=submission_id,
file_type_id=file_type_id,
job_type_id=JOB_TYPE_DICT['file_upload']
).one()
# mark as running and set new file name and path
upload_job.job_status_id = JOB_STATUS_DICT['running']
upload_job.original_filename = upload_file.file_name
upload_job.progress = 0
upload_job.filename = upload_file.upload_name
upload_job.error_message = None
else:
if upload_file.file_type in ['award', 'award_procurement']:
# file generation handled on backend, mark as ready
upload_status = JOB_STATUS_DICT['ready']
elif upload_file.file_type in ['executive_compensation', 'sub_award']:
# these are dependent on file D2 validation
upload_status = JOB_STATUS_DICT['waiting']
else:
# mark as running since frontend should be doing this upload
upload_status = JOB_STATUS_DICT['running']
upload_job = Job(
original_filename=upload_file.file_name,
filename=upload_file.upload_name,
file_type_id=file_type_id,
job_status_id=upload_status,
job_type_id=JOB_TYPE_DICT['file_upload'],
submission_id=submission_id)
sess.add(upload_job)
sess.flush()
if existing_submission:
# if the file's validation job is attached to an existing submission,
# reset its status and delete any validation artifacts (e.g., error metadata) that
# might exist from a previous run.
val_job = sess.query(Job).filter_by(
submission_id=submission_id,
file_type_id=file_type_id,
job_type_id=JOB_TYPE_DICT['csv_record_validation']
).one()
val_job.job_status_id = JOB_STATUS_DICT['waiting']
val_job.original_filename = upload_file.file_name
val_job.filename = upload_file.upload_name
val_job.progress = 0
# reset file size and number of rows to be set during validation of new file
val_job.file_size = None
val_job.number_of_rows = None
# delete error metadata this might exist from a previous run of this validation job
sess.query(ErrorMetadata).\
filter(ErrorMetadata.job_id == val_job.job_id).\
delete(synchronize_session='fetch')
# delete file error information that might exist from a previous run of this validation job
sess.query(File).filter(File.job_id == val_job.job_id).delete(synchronize_session='fetch')
else:
# create a new record validation job and add dependencies if necessary
if upload_file.file_type == 'executive_compensation':
d1_val_job = sess.query(Job).\
filter(Job.submission_id == submission_id,
Job.file_type_id == FILE_TYPE_DICT['award_procurement'],
Job.job_type_id == JOB_TYPE_DICT['csv_record_validation']).\
one_or_none()
if d1_val_job is None:
logger.error({
'message': 'Cannot create E job without a D1 job',
'message_type': 'CoreError',
'submission_id': submission_id,
'file_type': 'E'
})
raise Exception('Cannot create E job without a D1 job')
# Add dependency on D1 validation job
d1_dependency = JobDependency(job_id=upload_job.job_id, prerequisite_id=d1_val_job.job_id)
sess.add(d1_dependency)
elif upload_file.file_type == 'sub_award':
# todo: check for C validation job
c_val_job = sess.query(Job).\
filter(Job.submission_id == submission_id,
Job.file_type_id == FILE_TYPE_DICT['award_financial'],
Job.job_type_id == JOB_TYPE_DICT['csv_record_validation']).\
one_or_none()
if c_val_job is None:
logger.error({
'message': 'Cannot create F job without a C job',
'message_type': 'CoreError',
'submission_id': submission_id,
'file_type': 'F'
})
raise Exception('Cannot create F job without a C job')
# add dependency on C validation job
c_dependency = JobDependency(job_id=upload_job.job_id, prerequisite_id=c_val_job.job_id)
sess.add(c_dependency)
else:
# E and F don't get validation jobs
val_job = Job(
original_filename=upload_file.file_name,
filename=upload_file.upload_name,
file_type_id=file_type_id,
job_status_id=JOB_STATUS_DICT['waiting'],
job_type_id=JOB_TYPE_DICT['csv_record_validation'],
submission_id=submission_id)
sess.add(val_job)
sess.flush()
# add dependency between file upload job and file validation job
upload_dependency = JobDependency(job_id=val_job.job_id, prerequisite_id=upload_job.job_id)
sess.add(upload_dependency)
validation_job_id = val_job.job_id
sess.commit()
return validation_job_id, upload_job.job_id
def get_latest_published_date(submission, is_fabs=False):
if submission.publish_status_id != PUBLISH_STATUS_DICT['unpublished'] and\
submission.publish_status_id != PUBLISH_STATUS_DICT['publishing']:
sess = GlobalDB.db().session
last_published = sess.query(PublishHistory).filter_by(submission_id=submission.submission_id).\
order_by(PublishHistory.created_at.desc()).first()
published_files = None
if is_fabs:
published_files = sess.query(PublishedFilesHistory).\
filter_by(publish_history_id=last_published.publish_history_id).first()
if last_published and published_files:
return last_published.created_at, published_files.filename
elif last_published:
return last_published.created_at
return None
def get_certification_deadline(submission):
""" Return the certification deadline for the given submission
Arguments:
submission: the submission object to find its end window
Returns:
the datetime of the submission's window end
"""
sess = GlobalDB.db().session
cert_deadline = None
if not submission.is_fabs:
sub_period = submission.reporting_fiscal_period
sub_year = submission.reporting_fiscal_year
sub_window = sess.query(SubmissionWindowSchedule).filter_by(year=sub_year, period=sub_period).\
one_or_none()
cert_deadline = sub_window.certification_deadline.date() if sub_window else None
return cert_deadline
def get_time_period(submission):
""" Return the time period for the given submission
Arguments:
submission: the submission object to find its end window
Returns:
the time period of the submission
"""
if not submission.is_fabs and submission.is_quarter_format:
sub_quarter = submission.reporting_fiscal_period // 3
sub_year = submission.reporting_fiscal_year
time_period = 'FY {} / Q{}'.format(str(sub_year)[2:], sub_quarter)
else:
time_period = submission.reporting_start_date.strftime('%m / %Y') if submission.reporting_start_date else ''
return time_period
def filename_fyp_sub_format(submission):
""" Wrapper for filename_fyp_format that takes in a submission object (must have the necessary fields)
Arguments:
submission: the submission object to find the time period
Returns:
the submission's time period string for filenames
"""
return filename_fyp_format(submission.reporting_fiscal_year, submission.reporting_fiscal_period,
submission.is_quarter_format)
def filename_fyp_format(fy, period, is_quarter):
""" Return the proper FYP string to be included in the filenames throughout Broker
Arguments:
fy: fiscal year
period: the period
is_quarter: whether it should be based on quarters or periods
Returns:
the time period string for filenames
"""
if is_quarter:
suffix = 'Q{}'.format(int(period) // 3)
elif int(period) > 2:
suffix = 'P{}'.format(str(period).zfill(2))
else:
suffix = 'P01-P02'
return 'FY{}{}'.format(str(fy)[2:], suffix)
def get_last_validated_date(submission_id):
""" Return the oldest last validated date for validation jobs """
sess = GlobalDB.db().session
validation_job_types = [JOB_TYPE_DICT['csv_record_validation'], JOB_TYPE_DICT['validation']]
jobs = sess.query(Job).filter(Job.submission_id == submission_id,
Job.job_type_id.in_(validation_job_types)).all()
oldest_date = ''
for job in jobs:
# if any job's last validated doesn't exist, return blank immediately
if not job.last_validated:
return ''
if not oldest_date or job.last_validated < oldest_date:
oldest_date = job.last_validated
# Still need to do a check here in case there aren't any jobs for a submission.
# This is the case for a single unit test
return oldest_date.strftime('%Y-%m-%dT%H:%M:%S') if oldest_date else oldest_date
def get_fabs_meta(submission_id):
"""Return the total rows, valid rows, publish date, and publish file for FABS submissions"""
sess = GlobalDB.db().session
# get row counts from the FABS table
total_rows = sess.query(FABS).filter(FABS.submission_id == submission_id)
valid_rows = total_rows.filter(FABS.is_valid)
# retrieve the published data and file
submission = sess.query(Submission).filter(Submission.submission_id == submission_id).one()
publish_date, published_file = None, None
publish_data = get_latest_published_date(submission, is_fabs=True)
try:
iter(publish_data)
except TypeError:
publish_date = publish_data
else:
publish_date, file_path = publish_data
if CONFIG_BROKER['use_aws'] and file_path:
path, file_name = file_path.rsplit('/', 1) # split by last instance of /
published_file = S3Handler().get_signed_url(path=path, file_name=file_name,
bucket_route=CONFIG_BROKER['certified_bucket'],
url_mapping=CONFIG_BROKER['certified_bucket_mapping'])
elif file_path:
published_file = file_path
return {
'valid_rows': valid_rows.count(),
'total_rows': total_rows.count(),
'publish_date': publish_date.strftime('%Y-%m-%dT%H:%M:%S') if publish_date else None,
'published_file': published_file
}
def get_action_dates(submission_id):
""" Pull the earliest/latest action dates from the FABS table
Args:
submission_id: submission ID pull action dates from
Returns:
the earliest action date (str) or None if not found
the latest action date (str) or None if not found
"""
sess = GlobalDB.db().session
return sess.query(func.min(FABS.action_date).label('min_action_date'),
func.max(FABS.action_date).label('max_action_date'))\
.filter(FABS.submission_id == submission_id,
FABS.is_valid.is_(True)).one()
def get_last_modified(submission_id):
""" Get the last modified date for a submission
Args:
submission_id: submission ID to get the last modified for
Returns:
the last modified date of the provided submission or None if the submission doesn't exist
"""
submission_updated_view = SubmissionUpdatedView()
sess = GlobalDB.db().session
last_modified = sess.query(submission_updated_view.updated_at).\
filter(submission_updated_view.submission_id == submission_id).first()
return last_modified.updated_at if last_modified else None
def get_timestamp():
""" Gets a timestamp in seconds
Returns:
a string representing seconds since the epoch
"""
return str(int((datetime.utcnow() - datetime(1970, 1, 1)).total_seconds()))
def update_external_data_load_date(start_time, end_time, data_type):
""" Update the external_data_load_date table with the start and end times for the given data type
Args:
start_time: a datetime object indicating the start time of the external data load
end_time: a datetime object indicating the end time of the external data load
data_type: a string indicating the data type of the external data load
"""
sess = GlobalDB.db().session
last_stored_obj = sess.query(ExternalDataLoadDate).\
filter_by(external_data_type_id=EXTERNAL_DATA_TYPE_DICT[data_type]).one_or_none()
if not last_stored_obj:
last_stored_obj = ExternalDataLoadDate(
external_data_type_id=EXTERNAL_DATA_TYPE_DICT[data_type],
last_load_date_start=start_time, last_load_date_end=end_time)
sess.add(last_stored_obj)
else:
last_stored_obj.last_load_date_start = start_time
last_stored_obj.last_load_date_end = end_time
sess.commit()
|
cc0-1.0
|
e68401b7bec390722455397445c19848
| 39.262187
| 119
| 0.637979
| 3.989946
| false
| false
| false
| false
|
fedspendingtransparency/data-act-broker-backend
|
dataactcore/migrations/versions/0bf2ed508f33_add_state_city_and_county_name_to_.py
|
1
|
1405
|
"""Add state, city, and county name to PublishedAwardFinancialAssistance table
Revision ID: 0bf2ed508f33
Revises: 2c2b9b1ff0e5
Create Date: 2017-07-21 13:05:06.714431
"""
# revision identifiers, used by Alembic.
revision = '0bf2ed508f33'
down_revision = '2c2b9b1ff0e5'
branch_labels = None
depends_on = None
from alembic import op
import sqlalchemy as sa
def upgrade(engine_name):
globals()["upgrade_%s" % engine_name]()
def downgrade(engine_name):
globals()["downgrade_%s" % engine_name]()
def upgrade_data_broker():
### commands auto generated by Alembic - please adjust! ###
op.add_column('published_award_financial_assistance', sa.Column('place_of_perform_county_na', sa.Text(), nullable=True))
op.add_column('published_award_financial_assistance', sa.Column('place_of_perform_state_nam', sa.Text(), nullable=True))
op.add_column('published_award_financial_assistance', sa.Column('place_of_performance_city', sa.Text(), nullable=True))
### end Alembic commands ###
def downgrade_data_broker():
### commands auto generated by Alembic - please adjust! ###
op.drop_column('published_award_financial_assistance', 'place_of_performance_city')
op.drop_column('published_award_financial_assistance', 'place_of_perform_state_nam')
op.drop_column('published_award_financial_assistance', 'place_of_perform_county_na')
### end Alembic commands ###
|
cc0-1.0
|
69aa37f27c5256cc13c0798e34c3a3fb
| 30.931818
| 124
| 0.720996
| 3.178733
| false
| false
| false
| false
|
fedspendingtransparency/data-act-broker-backend
|
tests/unit/dataactbroker/test_generic_helper.py
|
1
|
6170
|
import pytest
import datetime as dt
import os
import shutil
from filecmp import dircmp
from zipfile import ZipFile
from sqlalchemy import func, or_
from dataactbroker.helpers.generic_helper import (year_period_to_dates, generate_raw_quoted_query, fy, batch as batcher,
zip_dir)
from dataactcore.models.jobModels import FileGeneration
from dataactcore.utils.responseException import ResponseException
legal_dates = {
dt.datetime(2017, 2, 2, 16, 43, 28, 377373): 2017,
dt.date(2017, 2, 2): 2017,
dt.datetime(2017, 10, 2, 16, 43, 28, 377373): 2018,
dt.date(2017, 10, 2): 2018,
'1000-09-30': 1000,
'1000-10-01': 1001,
'09-30-2000': 2000,
'10-01-2000': 2001,
'10-01-01': 2002
}
not_dates = (0, 2017.2, 'forthwith', 'string', '')
def test_year_period_to_dates():
""" Test successful conversions from quarter to dates """
# Test year/period that has dates in the same year
start, end = year_period_to_dates(2017, 4)
assert start == '01/01/2017'
assert end == '01/31/2017'
# Test year/period that has dates in the previous year
start, end = year_period_to_dates(2017, 2)
assert start == '11/01/2016'
assert end == '11/30/2016'
def test_year_period_to_dates_period_failure():
""" Test invalid quarter formats """
error_text = 'Period must be an integer 2-12.'
# Test period that's too high
with pytest.raises(ResponseException) as resp_except:
year_period_to_dates(2017, 13)
assert resp_except.value.status == 400
assert str(resp_except.value) == error_text
# Test period that's too low
with pytest.raises(ResponseException) as resp_except:
year_period_to_dates(2017, 1)
assert resp_except.value.status == 400
assert str(resp_except.value) == error_text
# Test null period
with pytest.raises(ResponseException) as resp_except:
year_period_to_dates(2017, None)
assert resp_except.value.status == 400
assert str(resp_except.value) == error_text
def test_year_period_to_dates_year_failure():
error_text = 'Year must be in YYYY format.'
# Test null year
with pytest.raises(ResponseException) as resp_except:
year_period_to_dates(None, 2)
assert resp_except.value.status == 400
assert str(resp_except.value) == error_text
# Test invalid year
with pytest.raises(ResponseException) as resp_except:
year_period_to_dates(999, 2)
assert resp_except.value.status == 400
assert str(resp_except.value) == error_text
def test_generate_raw_quoted_query(database):
sess = database.session
# Using FileGeneration for example
# Testing various filter logic
q = sess.query(FileGeneration.created_at).filter(
or_(FileGeneration.file_generation_id == 1, FileGeneration.request_date > dt.datetime(2018, 1, 15, 0, 0)),
FileGeneration.agency_code.like('A'),
FileGeneration.file_path.is_(None),
FileGeneration.agency_type.in_(['awarding', 'funding']),
FileGeneration.agency_type.in_([('test',)]),
FileGeneration.is_cached_file.is_(True)
)
expected = "SELECT file_generation.created_at " \
"FROM file_generation " \
"WHERE " \
"(file_generation.file_generation_id = 1 OR file_generation.request_date > '2018-01-15 00:00:00') " \
"AND file_generation.agency_code LIKE 'A' " \
"AND file_generation.file_path IS NULL " \
"AND file_generation.agency_type IN ('awarding', 'funding') " \
"AND file_generation.agency_type IN ('(''test'',)') " \
"AND file_generation.is_cached_file IS true"
assert generate_raw_quoted_query(q) == expected
# Testing funcs
q = sess.query(func.max(FileGeneration.file_generation_id).label("Test Label"))
expected = 'SELECT max(file_generation.file_generation_id) AS "Test Label" ' \
'FROM file_generation'
assert generate_raw_quoted_query(q) == expected
@pytest.mark.parametrize("raw_date, expected_fy", legal_dates.items())
def test_fy_returns_integer(raw_date, expected_fy):
assert isinstance(fy(raw_date), int)
@pytest.mark.parametrize("raw_date, expected_fy", legal_dates.items())
def test_fy_returns_correct(raw_date, expected_fy):
assert fy(raw_date) == expected_fy
@pytest.mark.parametrize("not_date", not_dates)
def test_fy_type_exceptions(not_date):
assert fy(None) is None
with pytest.raises(TypeError):
fy(not_date)
def test_batch():
""" Testing the batch function into chunks of 100 """
full_list = list(range(0, 1000))
initial_batch = list(range(0, 100))
iteration = 0
batch_size = 100
for batch in batcher(full_list, batch_size):
expected_batch = [x + (batch_size * iteration) for x in initial_batch]
assert expected_batch == batch
iteration += 1
assert iteration == 10
def test_zip_dir():
""" Testing creating a zip with the zip_dir function """
# make a directory with a couple files
test_dir_path = 'test directory'
os.mkdir(test_dir_path)
test_files = {
'test file a.txt': 'TEST',
'test file b.txt': 'FILES',
'test file c.txt': 'abcd',
}
for test_file_path, test_file_content in test_files.items():
with open(os.path.join(test_dir_path, test_file_path), 'w') as test_file:
test_file.write(test_file_content)
# zip it
test_zip_path = zip_dir(test_dir_path, 'test zip')
# keep the original directory and files to compare
os.rename(test_dir_path, '{} original'.format(test_dir_path))
assert test_zip_path == os.path.abspath('test zip.zip')
# confirm zip inside has the files
ZipFile(test_zip_path).extractall()
assert os.path.exists(test_dir_path)
dir_comp = dircmp('{} original'.format(test_dir_path), test_dir_path)
assert dir_comp.left_only == []
assert dir_comp.right_only == []
assert dir_comp.diff_files == []
# cleanup
os.remove(test_zip_path)
shutil.rmtree(test_dir_path)
shutil.rmtree('{} original'.format(test_dir_path))
|
cc0-1.0
|
676452e24a035918bdaace57005e373c
| 33.088398
| 120
| 0.64765
| 3.456583
| false
| true
| false
| false
|
fedspendingtransparency/data-act-broker-backend
|
dataactcore/migrations/versions/73db7d2cc754_add_award_procurement.py
|
2
|
14550
|
"""add_award_procurement
Revision ID: 73db7d2cc754
Revises: 31876fecc214
Create Date: 2016-09-01 15:08:33.267152
"""
# revision identifiers, used by Alembic.
revision = '73db7d2cc754'
down_revision = '31876fecc214'
branch_labels = None
depends_on = None
from alembic import op
import sqlalchemy as sa
def upgrade(engine_name):
globals()["upgrade_%s" % engine_name]()
def downgrade(engine_name):
globals()["downgrade_%s" % engine_name]()
def upgrade_data_broker():
### commands auto generated by Alembic - please adjust! ###
op.create_table('award_procurement',
sa.Column('created_at', sa.DateTime(), nullable=True),
sa.Column('updated_at', sa.DateTime(), nullable=True),
sa.Column('award_procurement_id', sa.Integer(), nullable=False),
sa.Column('submission_id', sa.Integer(), nullable=False),
sa.Column('job_id', sa.Integer(), nullable=False),
sa.Column('row_number', sa.Integer(), nullable=False),
sa.Column('piid', sa.Text(), nullable=True),
sa.Column('awarding_sub_tier_agency_c', sa.Text(), nullable=True),
sa.Column('awarding_sub_tier_agency_n', sa.Text(), nullable=True),
sa.Column('awarding_agency_code', sa.Text(), nullable=True),
sa.Column('awarding_agency_name', sa.Text(), nullable=True),
sa.Column('parent_award_id', sa.Text(), nullable=True),
sa.Column('award_modification_amendme', sa.Text(), nullable=True),
sa.Column('type_of_contract_pricing', sa.Text(), nullable=True),
sa.Column('contract_award_type', sa.Text(), nullable=True),
sa.Column('naics', sa.Text(), nullable=True),
sa.Column('naics_description', sa.Text(), nullable=True),
sa.Column('awardee_or_recipient_uniqu', sa.Text(), nullable=True),
sa.Column('ultimate_parent_legal_enti', sa.Text(), nullable=True),
sa.Column('ultimate_parent_unique_ide', sa.Text(), nullable=True),
sa.Column('award_description', sa.Text(), nullable=True),
sa.Column('place_of_performance_zip4a', sa.Text(), nullable=True),
sa.Column('place_of_performance_congr', sa.Text(), nullable=True),
sa.Column('awardee_or_recipient_legal', sa.Text(), nullable=True),
sa.Column('legal_entity_city_name', sa.Text(), nullable=True),
sa.Column('legal_entity_state_code', sa.Text(), nullable=True),
sa.Column('legal_entity_zip4', sa.Text(), nullable=True),
sa.Column('legal_entity_congressional', sa.Text(), nullable=True),
sa.Column('legal_entity_address_line1', sa.Text(), nullable=True),
sa.Column('legal_entity_address_line2', sa.Text(), nullable=True),
sa.Column('legal_entity_address_line3', sa.Text(), nullable=True),
sa.Column('legal_entity_country_code', sa.Text(), nullable=True),
sa.Column('legal_entity_country_name', sa.Text(), nullable=True),
sa.Column('period_of_performance_star', sa.Text(), nullable=True),
sa.Column('period_of_performance_curr', sa.Text(), nullable=True),
sa.Column('period_of_perf_potential_e', sa.Text(), nullable=True),
sa.Column('ordering_period_end_date', sa.Text(), nullable=True),
sa.Column('action_date', sa.Text(), nullable=True),
sa.Column('action_type', sa.Text(), nullable=True),
sa.Column('federal_action_obligation', sa.Text(), nullable=True),
sa.Column('current_total_value_award', sa.Numeric(), nullable=True),
sa.Column('potential_total_value_awar', sa.Numeric(), nullable=True),
sa.Column('funding_sub_tier_agency_co', sa.Text(), nullable=True),
sa.Column('funding_sub_tier_agency_na', sa.Text(), nullable=True),
sa.Column('funding_office_code', sa.Text(), nullable=True),
sa.Column('funding_office_name', sa.Text(), nullable=True),
sa.Column('awarding_office_code', sa.Text(), nullable=True),
sa.Column('awarding_office_name', sa.Text(), nullable=True),
sa.Column('referenced_idv_agency_iden', sa.Text(), nullable=True),
sa.Column('funding_agency_code', sa.Text(), nullable=True),
sa.Column('funding_agency_name', sa.Text(), nullable=True),
sa.Column('place_of_performance_locat', sa.Text(), nullable=True),
sa.Column('place_of_performance_state', sa.Text(), nullable=True),
sa.Column('place_of_perform_country_c', sa.Text(), nullable=True),
sa.Column('idv_type', sa.Text(), nullable=True),
sa.Column('vendor_doing_as_business_n', sa.Text(), nullable=True),
sa.Column('vendor_phone_number', sa.Text(), nullable=True),
sa.Column('vendor_fax_number', sa.Text(), nullable=True),
sa.Column('multiple_or_single_award_i', sa.Text(), nullable=True),
sa.Column('type_of_idc', sa.Text(), nullable=True),
sa.Column('a_76_fair_act_action', sa.Text(), nullable=True),
sa.Column('dod_claimant_program_code', sa.Text(), nullable=True),
sa.Column('clinger_cohen_act_planning', sa.Text(), nullable=True),
sa.Column('commercial_item_acquisitio', sa.Text(), nullable=True),
sa.Column('commercial_item_test_progr', sa.Text(), nullable=True),
sa.Column('consolidated_contract', sa.Text(), nullable=True),
sa.Column('contingency_humanitarian_o', sa.Text(), nullable=True),
sa.Column('contract_bundling', sa.Text(), nullable=True),
sa.Column('contract_financing', sa.Text(), nullable=True),
sa.Column('contracting_officers_deter', sa.Text(), nullable=True),
sa.Column('cost_accounting_standards', sa.Text(), nullable=True),
sa.Column('cost_or_pricing_data', sa.Text(), nullable=True),
sa.Column('country_of_product_or_serv', sa.Text(), nullable=True),
sa.Column('davis_bacon_act', sa.Text(), nullable=True),
sa.Column('evaluated_preference', sa.Text(), nullable=True),
sa.Column('extent_competed', sa.Text(), nullable=True),
sa.Column('fed_biz_opps', sa.Text(), nullable=True),
sa.Column('foreign_funding', sa.Text(), nullable=True),
sa.Column('government_furnished_equip', sa.Text(), nullable=True),
sa.Column('information_technology_com', sa.Text(), nullable=True),
sa.Column('interagency_contracting_au', sa.Text(), nullable=True),
sa.Column('local_area_set_aside', sa.Text(), nullable=True),
sa.Column('major_program', sa.Text(), nullable=True),
sa.Column('purchase_card_as_payment_m', sa.Text(), nullable=True),
sa.Column('multi_year_contract', sa.Text(), nullable=True),
sa.Column('national_interest_action', sa.Text(), nullable=True),
sa.Column('number_of_actions', sa.Text(), nullable=True),
sa.Column('number_of_offers_received', sa.Text(), nullable=True),
sa.Column('other_statutory_authority', sa.Text(), nullable=True),
sa.Column('performance_based_service', sa.Text(), nullable=True),
sa.Column('place_of_manufacture', sa.Text(), nullable=True),
sa.Column('price_evaluation_adjustmen', sa.Text(), nullable=True),
sa.Column('product_or_service_code', sa.Text(), nullable=True),
sa.Column('program_acronym', sa.Text(), nullable=True),
sa.Column('other_than_full_and_open_c', sa.Text(), nullable=True),
sa.Column('recovered_materials_sustai', sa.Text(), nullable=True),
sa.Column('research', sa.Text(), nullable=True),
sa.Column('sea_transportation', sa.Text(), nullable=True),
sa.Column('service_contract_act', sa.Text(), nullable=True),
sa.Column('small_business_competitive', sa.Text(), nullable=True),
sa.Column('solicitation_identifier', sa.Text(), nullable=True),
sa.Column('solicitation_procedures', sa.Text(), nullable=True),
sa.Column('fair_opportunity_limited_s', sa.Text(), nullable=True),
sa.Column('subcontracting_plan', sa.Text(), nullable=True),
sa.Column('program_system_or_equipmen', sa.Text(), nullable=True),
sa.Column('type_set_aside', sa.Text(), nullable=True),
sa.Column('epa_designated_product', sa.Text(), nullable=True),
sa.Column('walsh_healey_act', sa.Text(), nullable=True),
sa.Column('transaction_number', sa.Text(), nullable=True),
sa.Column('sam_exception', sa.Text(), nullable=True),
sa.Column('city_local_government', sa.Text(), nullable=True),
sa.Column('county_local_government', sa.Text(), nullable=True),
sa.Column('inter_municipal_local_gove', sa.Text(), nullable=True),
sa.Column('local_government_owned', sa.Text(), nullable=True),
sa.Column('municipality_local_governm', sa.Text(), nullable=True),
sa.Column('school_district_local_gove', sa.Text(), nullable=True),
sa.Column('township_local_government', sa.Text(), nullable=True),
sa.Column('us_state_government', sa.Text(), nullable=True),
sa.Column('us_federal_government', sa.Text(), nullable=True),
sa.Column('federal_agency', sa.Text(), nullable=True),
sa.Column('federally_funded_research', sa.Text(), nullable=True),
sa.Column('us_tribal_government', sa.Text(), nullable=True),
sa.Column('foreign_government', sa.Text(), nullable=True),
sa.Column('community_developed_corpor', sa.Text(), nullable=True),
sa.Column('labor_surplus_area_firm', sa.Text(), nullable=True),
sa.Column('corporate_entity_not_tax_e', sa.Text(), nullable=True),
sa.Column('corporate_entity_tax_exemp', sa.Text(), nullable=True),
sa.Column('partnership_or_limited_lia', sa.Text(), nullable=True),
sa.Column('sole_proprietorship', sa.Text(), nullable=True),
sa.Column('small_agricultural_coopera', sa.Text(), nullable=True),
sa.Column('international_organization', sa.Text(), nullable=True),
sa.Column('us_government_entity', sa.Text(), nullable=True),
sa.Column('emerging_small_business', sa.Text(), nullable=True),
sa.Column('c8a_program_participant', sa.Text(), nullable=True),
sa.Column('sba_certified_8_a_joint_ve', sa.Text(), nullable=True),
sa.Column('dot_certified_disadvantage', sa.Text(), nullable=True),
sa.Column('self_certified_small_disad', sa.Text(), nullable=True),
sa.Column('historically_underutilized', sa.Text(), nullable=True),
sa.Column('small_disadvantaged_busine', sa.Text(), nullable=True),
sa.Column('the_ability_one_program', sa.Text(), nullable=True),
sa.Column('historically_black_college', sa.Text(), nullable=True),
sa.Column('c1862_land_grant_college', sa.Text(), nullable=True),
sa.Column('c1890_land_grant_college', sa.Text(), nullable=True),
sa.Column('c1994_land_grant_college', sa.Text(), nullable=True),
sa.Column('minority_institution', sa.Text(), nullable=True),
sa.Column('private_university_or_coll', sa.Text(), nullable=True),
sa.Column('school_of_forestry', sa.Text(), nullable=True),
sa.Column('state_controlled_instituti', sa.Text(), nullable=True),
sa.Column('tribal_college', sa.Text(), nullable=True),
sa.Column('veterinary_college', sa.Text(), nullable=True),
sa.Column('educational_institution', sa.Text(), nullable=True),
sa.Column('alaskan_native_servicing_i', sa.Text(), nullable=True),
sa.Column('community_development_corp', sa.Text(), nullable=True),
sa.Column('native_hawaiian_servicing', sa.Text(), nullable=True),
sa.Column('domestic_shelter', sa.Text(), nullable=True),
sa.Column('manufacturer_of_goods', sa.Text(), nullable=True),
sa.Column('hospital_flag', sa.Text(), nullable=True),
sa.Column('veterinary_hospital', sa.Text(), nullable=True),
sa.Column('hispanic_servicing_institu', sa.Text(), nullable=True),
sa.Column('foundation', sa.Text(), nullable=True),
sa.Column('woman_owned_business', sa.Text(), nullable=True),
sa.Column('minority_owned_business', sa.Text(), nullable=True),
sa.Column('women_owned_small_business', sa.Text(), nullable=True),
sa.Column('economically_disadvantaged', sa.Text(), nullable=True),
sa.Column('joint_venture_women_owned', sa.Text(), nullable=True),
sa.Column('joint_venture_economically', sa.Text(), nullable=True),
sa.Column('veteran_owned_business', sa.Text(), nullable=True),
sa.Column('service_disabled_veteran_o', sa.Text(), nullable=True),
sa.Column('contracts', sa.Text(), nullable=True),
sa.Column('grants', sa.Text(), nullable=True),
sa.Column('receives_contracts_and_gra', sa.Text(), nullable=True),
sa.Column('airport_authority', sa.Text(), nullable=True),
sa.Column('council_of_governments', sa.Text(), nullable=True),
sa.Column('housing_authorities_public', sa.Text(), nullable=True),
sa.Column('interstate_entity', sa.Text(), nullable=True),
sa.Column('planning_commission', sa.Text(), nullable=True),
sa.Column('port_authority', sa.Text(), nullable=True),
sa.Column('transit_authority', sa.Text(), nullable=True),
sa.Column('subchapter_s_corporation', sa.Text(), nullable=True),
sa.Column('limited_liability_corporat', sa.Text(), nullable=True),
sa.Column('foreign_owned_and_located', sa.Text(), nullable=True),
sa.Column('american_indian_owned_busi', sa.Text(), nullable=True),
sa.Column('alaskan_native_owned_corpo', sa.Text(), nullable=True),
sa.Column('indian_tribe_federally_rec', sa.Text(), nullable=True),
sa.Column('native_hawaiian_owned_busi', sa.Text(), nullable=True),
sa.Column('tribally_owned_business', sa.Text(), nullable=True),
sa.Column('asian_pacific_american_own', sa.Text(), nullable=True),
sa.Column('black_american_owned_busin', sa.Text(), nullable=True),
sa.Column('hispanic_american_owned_bu', sa.Text(), nullable=True),
sa.Column('native_american_owned_busi', sa.Text(), nullable=True),
sa.Column('subcontinent_asian_asian_i', sa.Text(), nullable=True),
sa.Column('other_minority_owned_busin', sa.Text(), nullable=True),
sa.Column('for_profit_organization', sa.Text(), nullable=True),
sa.Column('nonprofit_organization', sa.Text(), nullable=True),
sa.Column('other_not_for_profit_organ', sa.Text(), nullable=True),
sa.Column('us_local_government', sa.Text(), nullable=True),
sa.Column('referenced_idv_modificatio', sa.Text(), nullable=True),
sa.Column('undefinitized_action', sa.Text(), nullable=True),
sa.Column('domestic_or_foreign_entity', sa.Text(), nullable=True),
sa.PrimaryKeyConstraint('award_procurement_id')
)
op.create_index(op.f('ix_award_procurement_job_id'), 'award_procurement', ['job_id'], unique=False)
op.create_index(op.f('ix_award_procurement_submission_id'), 'award_procurement', ['submission_id'], unique=False)
### end Alembic commands ###
def downgrade_data_broker():
### commands auto generated by Alembic - please adjust! ###
op.drop_index(op.f('ix_award_procurement_submission_id'), table_name='award_procurement')
op.drop_index(op.f('ix_award_procurement_job_id'), table_name='award_procurement')
op.drop_table('award_procurement')
### end Alembic commands ###
|
cc0-1.0
|
c2a33b3a6d028929e5a49086eb36cb90
| 58.387755
| 117
| 0.68433
| 3.169935
| false
| false
| false
| false
|
fedspendingtransparency/data-act-broker-backend
|
dataactcore/migrations/versions/fd0f86cc5705_fixing_extraneous_columns.py
|
1
|
2083
|
"""fixing extraneous columns
Revision ID: fd0f86cc5705
Revises: 539307ecadea
Create Date: 2017-09-20 13:58:34.720047
"""
# revision identifiers, used by Alembic.
revision = 'fd0f86cc5705'
down_revision = '539307ecadea'
branch_labels = None
depends_on = None
from alembic import op
import sqlalchemy as sa
def upgrade(engine_name):
globals()["upgrade_%s" % engine_name]()
def downgrade(engine_name):
globals()["downgrade_%s" % engine_name]()
def upgrade_data_broker():
### commands auto generated by Alembic - please adjust! ###
op.drop_column('detached_award_financial_assistance', 'legal_entity_country_name')
op.drop_column('detached_award_financial_assistance', 'funding_office_name')
op.drop_column('detached_award_financial_assistance', 'place_of_perform_county_na')
op.drop_column('detached_award_financial_assistance', 'place_of_perform_country_n')
op.drop_column('detached_award_financial_assistance', 'funding_agency_name')
op.drop_column('detached_award_financial_assistance', 'place_of_perform_county_co')
### end Alembic commands ###
def downgrade_data_broker():
### commands auto generated by Alembic - please adjust! ###
op.add_column('detached_award_financial_assistance', sa.Column('place_of_perform_county_co', sa.TEXT(), autoincrement=False, nullable=True))
op.add_column('detached_award_financial_assistance', sa.Column('funding_agency_name', sa.TEXT(), autoincrement=False, nullable=True))
op.add_column('detached_award_financial_assistance', sa.Column('place_of_perform_country_n', sa.TEXT(), autoincrement=False, nullable=True))
op.add_column('detached_award_financial_assistance', sa.Column('place_of_perform_county_na', sa.TEXT(), autoincrement=False, nullable=True))
op.add_column('detached_award_financial_assistance', sa.Column('funding_office_name', sa.TEXT(), autoincrement=False, nullable=True))
op.add_column('detached_award_financial_assistance', sa.Column('legal_entity_country_name', sa.TEXT(), autoincrement=False, nullable=True))
### end Alembic commands ###
|
cc0-1.0
|
db7e0a854db347bc222ceaaebc6f73e8
| 40.66
| 144
| 0.731637
| 3.194785
| false
| false
| false
| false
|
fedspendingtransparency/data-act-broker-backend
|
tests/unit/dataactvalidator/test_b19_object_class_program_activity.py
|
1
|
12345
|
from dataactcore.models.stagingModels import ObjectClassProgramActivity
from tests.unit.dataactvalidator.utils import number_of_errors, query_columns
_FILE = 'b19_object_class_program_activity'
def test_column_headers(database):
expected_subset = {'row_number', 'beginning_period_of_availa', 'ending_period_of_availabil',
'agency_identifier', 'allocation_transfer_agency', 'availability_type_code',
'main_account_code', 'sub_account_code', 'object_class', 'program_activity_code',
'by_direct_reimbursable_fun', 'disaster_emergency_fund_code', 'uniqueid_TAS',
'uniqueid_ProgramActivityCode', 'uniqueid_ProgramActivityName', 'uniqueid_ObjectClass',
'uniqueid_ByDirectReimbursableFundingSource', 'uniqueid_DisasterEmergencyFundCode'}
actual = set(query_columns(_FILE, database))
assert (actual & expected_subset) == expected_subset
def test_success(database):
""" Tests that all combinations of TAS, Object Class, Program Activity, Reimbursable Code, and DEFC in File B
(Object Class Program Activity) are unique
"""
op1 = ObjectClassProgramActivity(job_id=1, row_number=1, beginning_period_of_availa='1',
ending_period_of_availabil='1', agency_identifier='1',
allocation_transfer_agency='1', availability_type_code='1',
main_account_code='1', sub_account_code='1', object_class='1',
program_activity_code='1', program_activity_name='n',
by_direct_reimbursable_fun='r', disaster_emergency_fund_code='n')
op2 = ObjectClassProgramActivity(job_id=1, row_number=1, beginning_period_of_availa='2',
ending_period_of_availabil='1', agency_identifier='1',
allocation_transfer_agency='1', availability_type_code='1',
main_account_code='1', sub_account_code='1', object_class='1',
program_activity_code='1', program_activity_name='n',
by_direct_reimbursable_fun='r', disaster_emergency_fund_code='n')
op3 = ObjectClassProgramActivity(job_id=1, row_number=1, beginning_period_of_availa='1',
ending_period_of_availabil='2', agency_identifier='1',
allocation_transfer_agency='1', availability_type_code='1',
main_account_code='1', sub_account_code='1', object_class='1',
program_activity_code='1', program_activity_name='n',
by_direct_reimbursable_fun='r', disaster_emergency_fund_code='n')
op4 = ObjectClassProgramActivity(job_id=1, row_number=1, beginning_period_of_availa='1',
ending_period_of_availabil='1', agency_identifier='2',
allocation_transfer_agency='1', availability_type_code='1',
main_account_code='1', sub_account_code='1', object_class='1',
program_activity_code='1', program_activity_name='n',
by_direct_reimbursable_fun='r', disaster_emergency_fund_code='n')
op5 = ObjectClassProgramActivity(job_id=1, row_number=1, beginning_period_of_availa='1',
ending_period_of_availabil='1', agency_identifier='1',
allocation_transfer_agency='2', availability_type_code='1',
main_account_code='1', sub_account_code='1', object_class='1',
program_activity_code='1', program_activity_name='n',
by_direct_reimbursable_fun='r', disaster_emergency_fund_code='n')
op6 = ObjectClassProgramActivity(job_id=1, row_number=1, beginning_period_of_availa='1',
ending_period_of_availabil='1', agency_identifier='1',
allocation_transfer_agency='1', availability_type_code='2',
main_account_code='1', sub_account_code='1', object_class='1',
program_activity_code='1', program_activity_name='n',
by_direct_reimbursable_fun='r', disaster_emergency_fund_code='n')
op7 = ObjectClassProgramActivity(job_id=1, row_number=1, beginning_period_of_availa='1',
ending_period_of_availabil='1', agency_identifier='1',
allocation_transfer_agency='1', availability_type_code='1',
main_account_code='2', sub_account_code='1', object_class='1',
program_activity_code='1', program_activity_name='n',
by_direct_reimbursable_fun='r', disaster_emergency_fund_code='n')
op8 = ObjectClassProgramActivity(job_id=1, row_number=1, beginning_period_of_availa='1',
ending_period_of_availabil='1', agency_identifier='1',
allocation_transfer_agency='1', availability_type_code='1',
main_account_code='1', sub_account_code='2', object_class='1',
program_activity_code='1', program_activity_name='n',
by_direct_reimbursable_fun='r', disaster_emergency_fund_code='n')
op9 = ObjectClassProgramActivity(job_id=1, row_number=1, beginning_period_of_availa='1',
ending_period_of_availabil='1', agency_identifier='1',
allocation_transfer_agency='1', availability_type_code='1',
main_account_code='1', sub_account_code='1', object_class='2',
program_activity_code='1', program_activity_name='n',
by_direct_reimbursable_fun='r', disaster_emergency_fund_code='n')
op10 = ObjectClassProgramActivity(job_id=1, row_number=1, beginning_period_of_availa='1',
ending_period_of_availabil='1', agency_identifier='1',
allocation_transfer_agency='1', availability_type_code='1',
main_account_code='1', sub_account_code='1', object_class='1',
program_activity_code='2', program_activity_name='n',
by_direct_reimbursable_fun='r', disaster_emergency_fund_code='n')
op11 = ObjectClassProgramActivity(job_id=1, row_number=1, beginning_period_of_availa='1',
ending_period_of_availabil='1', agency_identifier='1',
allocation_transfer_agency='1', availability_type_code='1',
main_account_code='1', sub_account_code='1', object_class='1',
program_activity_code='1', program_activity_name='m',
by_direct_reimbursable_fun='r', disaster_emergency_fund_code='n')
op12 = ObjectClassProgramActivity(job_id=1, row_number=1, beginning_period_of_availa='1',
ending_period_of_availabil='1', agency_identifier='1',
allocation_transfer_agency='1', availability_type_code='1',
main_account_code='1', sub_account_code='1', object_class='1',
program_activity_code='1', program_activity_name='n',
by_direct_reimbursable_fun='d', disaster_emergency_fund_code='n')
# Same values but a different DEFC
op13 = ObjectClassProgramActivity(job_id=1, row_number=1, beginning_period_of_availa='1',
ending_period_of_availabil='1', agency_identifier='1',
allocation_transfer_agency='1', availability_type_code='1',
main_account_code='1', sub_account_code='1', object_class='1',
program_activity_code='1', program_activity_name='n',
by_direct_reimbursable_fun='d', disaster_emergency_fund_code='m')
assert number_of_errors(_FILE, database, models=[op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11,
op12, op13]) == 0
def test_optionals(database):
""" Tests that all combinations of TAS, Object Class, Reimbursable Code, and DEFC in File B (Object Class Program
Activity) are not unique, while omitting an optional field to check that there is still a match
"""
op1 = ObjectClassProgramActivity(job_id=1, row_number=1, beginning_period_of_availa='1',
ending_period_of_availabil='1', agency_identifier='1',
availability_type_code='1', main_account_code='1', sub_account_code='1',
object_class='1', program_activity_code='1', program_activity_name='n',
by_direct_reimbursable_fun='r', disaster_emergency_fund_code='n')
op2 = ObjectClassProgramActivity(job_id=1, row_number=1, beginning_period_of_availa='1',
ending_period_of_availabil='1', agency_identifier='1',
availability_type_code='1', main_account_code='1', sub_account_code='1',
object_class='1', program_activity_code='1', program_activity_name='n',
by_direct_reimbursable_fun='r', disaster_emergency_fund_code='n')
assert number_of_errors(_FILE, database, models=[op1, op2]) == 1
def test_failure(database):
""" Tests that all combinations of TAS, Object Class, Program Activity, Reimbursable Code, and DEFC in File B
(Object Class Program Activity) are not unique
"""
op1 = ObjectClassProgramActivity(job_id=1, row_number=1, beginning_period_of_availa='1',
ending_period_of_availabil='1', agency_identifier='1',
allocation_transfer_agency='1', availability_type_code='1',
main_account_code='1', sub_account_code='1', object_class='1',
program_activity_code='1', program_activity_name='n',
by_direct_reimbursable_fun='r', disaster_emergency_fund_code='n')
op2 = ObjectClassProgramActivity(job_id=1, row_number=1, beginning_period_of_availa='1',
ending_period_of_availabil='1', agency_identifier='1',
allocation_transfer_agency='1', availability_type_code='1',
main_account_code='1', sub_account_code='1', object_class='1',
program_activity_code='1', program_activity_name='n',
by_direct_reimbursable_fun='r', disaster_emergency_fund_code='N')
# object class with extra trailing zeroes treated the same as without
op3 = ObjectClassProgramActivity(job_id=1, row_number=1, beginning_period_of_availa='1',
ending_period_of_availabil='1', agency_identifier='1',
allocation_transfer_agency='1', availability_type_code='1',
main_account_code='1', sub_account_code='1', object_class='10',
program_activity_code='1', program_activity_name='n',
by_direct_reimbursable_fun='r', disaster_emergency_fund_code='N')
assert number_of_errors(_FILE, database, models=[op1, op2, op3]) == 2
|
cc0-1.0
|
b474d066b4617903d8505ce4857c22d4
| 72.922156
| 117
| 0.530741
| 4.105421
| false
| false
| false
| false
|
fedspendingtransparency/data-act-broker-backend
|
tests/unit/dataactvalidator/test_a3_appropriations.py
|
1
|
1790
|
from tests.unit.dataactcore.factories.staging import AppropriationFactory
from tests.unit.dataactvalidator.utils import number_of_errors, query_columns
_FILE = 'a3_appropriations'
def test_column_headers(database):
expected_subset = {'uniqueid_TAS', 'row_number', 'other_budgetary_resources_cpe', 'contract_authority_amount_cpe',
'borrowing_authority_amount_cpe', 'spending_authority_from_of_cpe', 'difference'}
actual = set(query_columns(_FILE, database))
assert expected_subset == actual
def test_success(database):
""" Test that TAS values can be found, and null matches work correctly """
approp = AppropriationFactory(other_budgetary_resources_cpe=600, contract_authority_amount_cpe=100,
borrowing_authority_amount_cpe=200, spending_authority_from_of_cpe=300)
approp_null = AppropriationFactory(other_budgetary_resources_cpe=300, contract_authority_amount_cpe=100,
borrowing_authority_amount_cpe=200, spending_authority_from_of_cpe=None)
errors = number_of_errors(_FILE, database, models=[approp, approp_null])
assert errors == 0
def test_failure(database):
""" Test that tas that does not match is an error """
approp = AppropriationFactory(other_budgetary_resources_cpe=800, contract_authority_amount_cpe=100,
borrowing_authority_amount_cpe=200, spending_authority_from_of_cpe=300)
approp_null = AppropriationFactory(other_budgetary_resources_cpe=500, contract_authority_amount_cpe=100,
borrowing_authority_amount_cpe=200, spending_authority_from_of_cpe=None)
errors = number_of_errors(_FILE, database, models=[approp, approp_null])
assert errors == 2
|
cc0-1.0
|
c880ba1c954a7a62a590058a99c86577
| 51.647059
| 118
| 0.69162
| 3.587174
| false
| true
| false
| false
|
fedspendingtransparency/data-act-broker-backend
|
dataactcore/models/validationModels.py
|
1
|
3738
|
""" These classes define the ORM models to be used by sqlalchemy for the job tracker database """
from sqlalchemy import Column, Integer, Text, ForeignKey, Boolean, Enum
from sqlalchemy.orm import relationship
from dataactcore.models.baseModel import Base
class FieldType(Base):
__tablename__ = "field_type"
field_type_id = Column(Integer, primary_key=True)
name = Column(Text)
description = Column(Text)
TYPE_DICT = None
class FileColumn(Base):
__tablename__ = "file_columns"
file_column_id = Column(Integer, primary_key=True)
file_id = Column(Integer, ForeignKey("file_type.file_type_id", name="fk_file_column_file_type"), nullable=True)
file = relationship("FileType", uselist=False)
field_types_id = Column(Integer, ForeignKey("field_type.field_type_id"), nullable=True)
field_type = relationship("FieldType", uselist=False)
daims_name = Column(Text, nullable=True)
name = Column(Text, nullable=True)
name_short = Column(Text, nullable=True)
description = Column(Text, nullable=True)
required = Column(Boolean, nullable=True)
padded_flag = Column(Boolean, default=False, server_default="False", nullable=False)
length = Column(Integer)
class RuleSeverity(Base):
__tablename__ = "rule_severity"
rule_severity_id = Column(Integer, primary_key=True)
name = Column(Text, nullable=False)
description = Column(Text, nullable=False)
SEVERITY_DICT = None
class RuleSql(Base):
__tablename__ = "rule_sql"
rule_sql_id = Column(Integer, primary_key=True)
rule_sql = Column(Text, nullable=False)
rule_label = Column(Text)
rule_error_message = Column(Text, nullable=False)
rule_cross_file_flag = Column(Boolean, nullable=False)
file_id = Column(Integer, ForeignKey("file_type.file_type_id", name="fk_file"), nullable=True)
file = relationship("FileType", uselist=False, foreign_keys=[file_id])
rule_severity_id = Column(Integer, ForeignKey("rule_severity.rule_severity_id"), nullable=False)
rule_severity = relationship("RuleSeverity", uselist=False)
target_file_id = Column(Integer, ForeignKey("file_type.file_type_id", name="fk_target_file"), nullable=True)
target_file = relationship("FileType", uselist=False, foreign_keys=[target_file_id])
query_name = Column(Text)
expected_value = Column(Text)
category = Column(Text)
class ValidationLabel(Base):
__tablename__ = "validation_label"
validation_label_id = Column(Integer, primary_key=True)
label = Column(Text)
error_message = Column(Text)
file_id = Column(Integer, ForeignKey("file_type.file_type_id", name="fk_file"), nullable=True)
file = relationship("FileType", uselist=False, foreign_keys=[file_id])
column_name = Column(Text)
label_type = Column(Enum('requirement', 'type', name='label_types'))
class RuleSetting(Base):
__tablename__ = "rule_settings"
rule_settings_id = Column(Integer, primary_key=True)
agency_code = Column(Text)
rule_label = Column(Text, nullable=False)
file_id = Column(Integer, ForeignKey("file_type.file_type_id", name="fk_setting_file_type"), nullable=True)
target_file_id = Column(Integer, ForeignKey("file_type.file_type_id", name="fk_setting_target_file_type"),
nullable=True)
priority = Column(Integer, nullable=False)
impact_id = Column(Integer, ForeignKey("rule_impact.rule_impact_id", ondelete="CASCADE", name="fk_impact"),
nullable=False)
class RuleImpact(Base):
__tablename__ = "rule_impact"
rule_impact_id = Column(Integer, primary_key=True)
name = Column(Text, nullable=False)
description = Column(Text, nullable=False)
IMPACT_DICT = None
|
cc0-1.0
|
2baaaab6e76104201857b2db02590999
| 37.536082
| 115
| 0.692349
| 3.611594
| false
| false
| false
| false
|
purduesigbots/pros-cli
|
pros/serial/devices/vex/stm32_device.py
|
1
|
7686
|
import itertools
import operator
import struct
import time
import typing
from functools import reduce
from typing import *
import pros.common.ui as ui
from pros.common import logger, retries
from pros.serial import bytes_to_str
from pros.serial.devices.vex import VEXCommError
from pros.serial.ports import BasePort
from ..generic_device import GenericDevice
from ..system_device import SystemDevice
class STM32Device(GenericDevice, SystemDevice):
ACK_BYTE = 0x79
NACK_BYTE = 0xFF
NUM_PAGES = 0xff
PAGE_SIZE = 0x2000
def __init__(self, port: BasePort, must_initialize: bool = False, do_negoitate: bool = True):
super().__init__(port)
self.commands = bytes([0x00, 0x01, 0x02, 0x11, 0x21, 0x31, 0x43, 0x63, 0x73, 0x82, 0x92])
if do_negoitate:
# self.port.write(b'\0' * 255)
if must_initialize:
self._txrx_command(0x7f, checksum=False)
try:
self.get(n_retries=0)
except:
logger(__name__).info('Sending bootloader initialization')
time.sleep(0.01)
self.port.rts = 0
for _ in itertools.repeat(None, times=3):
time.sleep(0.01)
self._txrx_command(0x7f, checksum=False)
time.sleep(0.01)
self.get()
def write_program(self, file: typing.BinaryIO, preserve_fs: bool = False, go_after: bool = True, **_):
file_len = file.seek(0, 2)
file.seek(0, 0)
if file_len > (self.NUM_PAGES * self.PAGE_SIZE):
raise VEXCommError(
f'File is too big to be uploaded (max file size: {self.NUM_PAGES * self.PAGE_SIZE} bytes)')
if hasattr(file, 'name'):
display_name = file.name
else:
display_name = '(memory)'
if not preserve_fs:
self.erase_all()
else:
self.erase_memory(list(range(0, int(file_len / self.PAGE_SIZE) + 1)))
address = 0x08000000
with ui.progressbar(length=file_len, label=f'Uploading {display_name}') as progress:
for i in range(0, file_len, 256):
write_size = 256
if i + 256 > file_len:
write_size = file_len - i
self.write_memory(address, file.read(write_size))
address += write_size
progress.update(write_size)
if go_after:
self.go(0x08000000)
def scan_prosfs(self):
pass
@retries
def get(self):
logger(__name__).info('STM32: Get')
self._txrx_command(0x00)
n_bytes = self.port.read(1)[0]
assert n_bytes == 11
data = self.port.read(n_bytes + 1)
logger(__name__).info(f'STM32 Bootloader version 0x{data[0]:x}')
self.commands = data[1:]
logger(__name__).debug(f'STM32 Bootloader commands are: {bytes_to_str(data[1:])}')
assert self.port.read(1)[0] == self.ACK_BYTE
@retries
def get_read_protection_status(self):
logger(__name__).info('STM32: Get ID & Read Protection Status')
self._txrx_command(0x01)
data = self.port.read(3)
logger(__name__).debug(f'STM32 Bootloader Get Version & Read Protection Status is: {bytes_to_str(data)}')
assert self.port.read(1)[0] == self.ACK_BYTE
@retries
def get_id(self):
logger(__name__).info('STM32: Get PID')
self._txrx_command(0x02)
n_bytes = self.port.read(1)[0]
pid = self.port.read(n_bytes + 1)
logger(__name__).debug(f'STM32 Bootloader PID is {pid}')
@retries
def read_memory(self, address: int, n_bytes: int):
logger(__name__).info(f'STM32: Read {n_bytes} fromo 0x{address:x}')
assert 255 >= n_bytes > 0
self._txrx_command(0x11)
self._txrx_command(struct.pack('>I', address))
self._txrx_command(n_bytes)
return self.port.read(n_bytes)
@retries
def go(self, start_address: int):
logger(__name__).info(f'STM32: Go 0x{start_address:x}')
self._txrx_command(0x21)
try:
self._txrx_command(struct.pack('>I', start_address), timeout=5.)
except VEXCommError:
logger(__name__).warning('STM32 Bootloader did not acknowledge GO command. '
'The program may take a moment to begin running '
'or the device should be rebooted.')
@retries
def write_memory(self, start_address: int, data: bytes):
logger(__name__).info(f'STM32: Write {len(data)} to 0x{start_address:x}')
assert 0 < len(data) <= 256
if len(data) % 4 != 0:
data = data + (b'\0' * (4 - (len(data) % 4)))
self._txrx_command(0x31)
self._txrx_command(struct.pack('>I', start_address))
self._txrx_command(bytes([len(data) - 1, *data]))
@retries
def erase_all(self):
logger(__name__).info('STM32: Erase all pages')
if not self.commands[6] == 0x43:
raise VEXCommError('Standard erase not supported on this device (only extended erase)')
self._txrx_command(0x43)
self._txrx_command(0xff)
@retries
def erase_memory(self, page_numbers: List[int]):
logger(__name__).info(f'STM32: Erase pages: {page_numbers}')
if not self.commands[6] == 0x43:
raise VEXCommError('Standard erase not supported on this device (only extended erase)')
assert 0 < len(page_numbers) <= 255
assert all([0 <= p <= 255 for p in page_numbers])
self._txrx_command(0x43)
self._txrx_command(bytes([len(page_numbers) - 1, *page_numbers]))
@retries
def extended_erase(self, page_numbers: List[int]):
logger(__name__).info(f'STM32: Extended Erase pages: {page_numbers}')
if not self.commands[6] == 0x44:
raise IOError('Extended erase not supported on this device (only standard erase)')
assert 0 < len(page_numbers) < 0xfff0
assert all([0 <= p <= 0xffff for p in page_numbers])
self._txrx_command(0x44)
self._txrx_command(bytes([len(page_numbers) - 1, *struct.pack(f'>{len(page_numbers)}H', *page_numbers)]))
@retries
def extended_erase_special(self, command: int):
logger(__name__).info(f'STM32: Extended special erase: {command:x}')
if not self.commands[6] == 0x44:
raise IOError('Extended erase not supported on this device (only standard erase)')
assert 0xfffd <= command <= 0xffff
self._txrx_command(0x44)
self._txrx_command(struct.pack('>H', command))
def _txrx_command(self, command: Union[int, bytes], timeout: float = 0.01, checksum: bool = True):
self.port.read_all()
if isinstance(command, bytes):
message = command + (bytes([reduce(operator.xor, command, 0x00)]) if checksum else bytes([]))
elif isinstance(command, int):
message = bytearray([command, ~command & 0xff] if checksum else [command])
else:
raise ValueError(f'Expected command to be bytes or int but got {type(command)}')
logger(__name__).debug(f'STM32 TX: {bytes_to_str(message)}')
self.port.write(message)
self.port.flush()
start_time = time.time()
while time.time() - start_time < timeout:
data = self.port.read(1)
if data and len(data) == 1:
logger(__name__).debug(f'STM32 RX: {data[0]} =?= {self.ACK_BYTE}')
if data[0] == self.ACK_BYTE:
return
raise VEXCommError(f"Device never ACK'd to {command}", command)
|
mpl-2.0
|
563af08c95bbde1be97afb119f03b933
| 39.240838
| 113
| 0.579235
| 3.541935
| false
| false
| false
| false
|
purduesigbots/pros-cli
|
pros/cli/main.py
|
1
|
3332
|
import logging
# Setup analytics first because it is used by other files
import os.path
import pros.common.sentry
import click
import sys
import pros.common.ui as ui
import pros.common.ui.log
from pros.cli.click_classes import *
from pros.cli.common import default_options, root_commands
from pros.common.utils import get_version, logger
from pros.ga.analytics import analytics
import jsonpickle
import pros.cli.build
import pros.cli.conductor
import pros.cli.conductor_utils
import pros.cli.terminal
import pros.cli.upload
import pros.cli.v5_utils
import pros.cli.misc_commands
import pros.cli.interactive
import pros.cli.user_script
root_sources = [
'build',
'conductor',
'conductor_utils',
'terminal',
'upload',
'v5_utils',
'misc_commands', # misc_commands must be after upload so that "pros u" is an alias for upload, not upgrade
'interactive',
'user_script'
]
if getattr(sys, 'frozen', False):
exe_file = sys.executable
else:
exe_file = __file__
if os.path.exists(os.path.join(os.path.dirname(exe_file), os.pardir, os.pardir, '.git')):
root_sources.append('test')
if os.path.exists(os.path.join(os.path.dirname(exe_file), os.pardir, os.pardir, '.git')):
import pros.cli.test
for root_source in root_sources:
__import__(f'pros.cli.{root_source}')
def main():
try:
ctx_obj = {}
click_handler = pros.common.ui.log.PROSLogHandler(ctx_obj=ctx_obj)
ctx_obj['click_handler'] = click_handler
formatter = pros.common.ui.log.PROSLogFormatter('%(levelname)s - %(name)s:%(funcName)s - %(message)s - pros-cli version:{version}'
.format(version = get_version()), ctx_obj)
click_handler.setFormatter(formatter)
logging.basicConfig(level=logging.WARNING, handlers=[click_handler])
cli.main(prog_name='pros', obj=ctx_obj)
except KeyboardInterrupt:
click.echo('Aborted!')
except Exception as e:
logger(__name__).exception(e)
def version(ctx: click.Context, param, value):
if not value:
return
ctx.ensure_object(dict)
if ctx.obj.get('machine_output', False):
ui.echo(get_version())
else:
ui.echo('pros, version {}'.format(get_version()))
ctx.exit(0)
def use_analytics(ctx: click.Context, param, value):
if value == None:
return
touse = not analytics.useAnalytics
if str(value).lower().startswith("t"):
touse = True
elif str(value).lower().startswith("f"):
touse = False
else:
ui.echo('Invalid argument provided for \'--use-analytics\'. Try \'--use-analytics=False\' or \'--use-analytics=True\'')
ctx.exit(0)
ctx.ensure_object(dict)
analytics.set_use(touse)
ui.echo('Analytics set to : {}'.format(analytics.useAnalytics))
ctx.exit(0)
@click.command('pros',
cls=PROSCommandCollection,
sources=root_commands)
@default_options
@click.option('--version', help='Displays version and exits.', is_flag=True, expose_value=False, is_eager=True,
callback=version)
@click.option('--use-analytics', help='Set analytics usage (True/False).', type=str, expose_value=False,
is_eager=True, default=None, callback=use_analytics)
def cli():
pros.common.sentry.register()
if __name__ == '__main__':
main()
|
mpl-2.0
|
1f7cd4a1d68d96842c5a5a3f17ad2558
| 28.22807
| 138
| 0.663866
| 3.375887
| false
| false
| false
| false
|
purduesigbots/pros-cli
|
pros/conductor/interactive/UpdateProjectModal.py
|
1
|
6239
|
import os.path
from typing import *
from click import Context, get_current_context
from semantic_version import Version
from pros.common import ui
from pros.common.ui.interactive import application, components, parameters
from pros.conductor import BaseTemplate, Conductor, Project
from pros.conductor.project.ProjectTransaction import ProjectTransaction
from .components import TemplateListingComponent
from .parameters import ExistingProjectParameter, TemplateParameter
class UpdateProjectModal(application.Modal[None]):
@property
def is_processing(self):
return self._is_processing
@is_processing.setter
def is_processing(self, value: bool):
self._is_processing = bool(value)
self.redraw()
def _generate_transaction(self) -> ProjectTransaction:
transaction = ProjectTransaction(self.project, self.conductor)
apply_kwargs = dict(
force_apply=self.force_apply_parameter.value
)
if self.name.value != self.project.name:
transaction.change_name(self.name.value)
if self.project.template_is_applicable(self.current_kernel.value, **apply_kwargs):
transaction.apply_template(self.current_kernel.value, **apply_kwargs)
for template in self.current_templates:
if template.removed:
transaction.rm_template(BaseTemplate.create_query(template.value.name))
elif self.project.template_is_applicable(template.value, **apply_kwargs):
transaction.apply_template(template.value, **apply_kwargs)
for template in self.new_templates:
if not template.removed: # template should never be "removed"
transaction.apply_template(template.value, force_apply=self.force_apply_parameter.value)
return transaction
def _add_template(self):
options = self.conductor.resolve_templates(identifier=BaseTemplate(target=self.project.target), unique=True)
ui.logger(__name__).debug(options)
p = TemplateParameter(None, options)
@p.on('removed')
def remove_template():
self.new_templates.remove(p)
self.new_templates.append(p)
def __init__(self, ctx: Optional[Context] = None, conductor: Optional[Conductor] = None,
project: Optional[Project] = None):
super().__init__('Update a project')
self.conductor = conductor or Conductor()
self.click_ctx = ctx or get_current_context()
self._is_processing = False
self.project: Optional[Project] = project
self.project_path = ExistingProjectParameter(
str(project.location) if project else os.path.join(os.path.expanduser('~'), 'My PROS Project')
)
self.name = parameters.Parameter(None)
self.current_kernel: TemplateParameter = None
self.current_templates: List[TemplateParameter] = []
self.new_templates: List[TemplateParameter] = []
self.force_apply_parameter = parameters.BooleanParameter(False)
self.templates_collapsed = parameters.BooleanParameter(False)
self.advanced_collapsed = parameters.BooleanParameter(True)
self.add_template_button = components.Button('Add Template')
self.add_template_button.on_clicked(self._add_template)
cb = self.project_path.on_changed(self.project_changed, asynchronous=True)
if self.project_path.is_valid():
cb(self.project_path)
def project_changed(self, new_project: ExistingProjectParameter):
try:
self.is_processing = True
self.project = Project(new_project.value)
self.name.update(self.project.project_name)
self.current_kernel = TemplateParameter(
None,
options=sorted(
{t for t in self.conductor.resolve_templates(self.project.templates['kernel'].as_query())},
key=lambda v: Version(v.version), reverse=True
)
)
self.current_templates = [
TemplateParameter(
None,
options=sorted({
t
for t in self.conductor.resolve_templates(t.as_query())
}, key=lambda v: Version(v.version), reverse=True)
)
for t in self.project.templates.values()
if t.name != 'kernel'
]
self.new_templates = []
self.is_processing = False
except BaseException as e:
ui.logger(__name__).exception(e)
def confirm(self, *args, **kwargs):
self.exit()
self._generate_transaction().execute()
@property
def can_confirm(self):
return self.project and self._generate_transaction().can_execute()
def build(self) -> Generator[components.Component, None, None]:
yield components.DirectorySelector('Project Directory', self.project_path)
if self.is_processing:
yield components.Spinner()
elif self.project_path.is_valid():
assert self.project is not None
yield components.Label(f'Modify your {self.project.target} project.')
yield components.InputBox('Project Name', self.name)
yield TemplateListingComponent(self.current_kernel, editable=dict(version=True), removable=False)
yield components.Container(
*(TemplateListingComponent(t, editable=dict(version=True), removable=True) for t in
self.current_templates),
*(TemplateListingComponent(t, editable=True, removable=True) for t in self.new_templates),
self.add_template_button,
title='Templates',
collapsed=self.templates_collapsed
)
yield components.Container(
components.Checkbox('Re-apply all templates', self.force_apply_parameter),
title='Advanced',
collapsed=self.advanced_collapsed
)
yield components.Label('What will happen when you click "Continue":')
yield components.VerbatimLabel(self._generate_transaction().describe())
|
mpl-2.0
|
80e25e86760ea71d98cf14df156880c9
| 41.442177
| 116
| 0.633916
| 4.456429
| false
| false
| false
| false
|
purduesigbots/pros-cli
|
pros/serial/devices/vex/v5_device.py
|
1
|
50297
|
import gzip
import io
import re
import struct
import time
import typing
from collections import defaultdict
from configparser import ConfigParser
from datetime import datetime, timedelta
from enum import IntEnum, IntFlag
from io import BytesIO, StringIO
from pathlib import Path
from typing import *
from typing import BinaryIO
from semantic_version import Spec
from pros.common import ui
from pros.common import *
from pros.common.utils import *
from pros.conductor import Project
from pros.serial import bytes_to_str, decode_bytes_to_str
from pros.serial.ports import BasePort, list_all_comports
from .comm_error import VEXCommError
from .crc import CRC
from .message import Message
from .vex_device import VEXDevice
from ..system_device import SystemDevice
int_str = Union[int, str]
def find_v5_ports(p_type: str):
def filter_vex_ports(p):
return p.vid is not None and p.vid in [0x2888, 0x0501] or \
p.name is not None and ('VEX' in p.name or 'V5' in p.name)
def filter_v5_ports(p, locations, names):
return (p.location is not None and any([p.location.endswith(l) for l in locations])) or \
(p.name is not None and any([n in p.name for n in names])) or \
(p.description is not None and any([n in p.description for n in names]))
ports = [p for p in list_all_comports() if filter_vex_ports(p)]
# Initially try filtering based off of location or the name of the device.
# Doesn't work on macOS or Jonathan's Dell, so we have a fallback (below)
user_ports = [p for p in ports if filter_v5_ports(p, ['2'], ['User'])]
system_ports = [p for p in ports if filter_v5_ports(p, ['0'], ['System', 'Communications'])]
joystick_ports = [p for p in ports if filter_v5_ports(p, ['1'], ['Controller'])]
# Testing this code path is hard!
if len(user_ports) != len(system_ports):
if len(user_ports) > len(system_ports):
user_ports = [p for p in user_ports if p not in system_ports]
else:
system_ports = [p for p in system_ports if p not in user_ports]
if len(user_ports) == len(system_ports) and len(user_ports) > 0:
if p_type.lower() == 'user':
return user_ports
elif p_type.lower() == 'system':
return system_ports + joystick_ports
else:
raise ValueError(f'Invalid port type specified: {p_type}')
# None of the typical filters worked, so if there are only two ports, then the lower one is always*
# the USER? port (*always = I haven't found a guarantee)
if len(ports) == 2:
# natural sort based on: https://stackoverflow.com/a/16090640
def natural_key(chunk: str):
return [int(text) if text.isdigit() else text.lower() for text in re.split(r'(\d+)', chunk)]
ports = sorted(ports, key=lambda p: natural_key(p.device))
if p_type.lower() == 'user':
return [ports[1]]
elif p_type.lower() == 'system':
return [ports[0], *joystick_ports]
else:
raise ValueError(f'Invalid port type specified: {p_type}')
# these can now also be used as user ports
if len(joystick_ports) > 0: # and p_type.lower() == 'system':
return joystick_ports
return []
def with_download_channel(f):
"""
Function decorator for use inside V5Device class. Needs to be outside the class because @staticmethod prevents
us from making a function decorator
"""
def wrapped(device, *args, **kwargs):
with V5Device.DownloadChannel(device):
return f(device, *args, **kwargs)
return wrapped
def compress_file(file: BinaryIO, file_len: int, label='Compressing binary') -> Tuple[BinaryIO, int]:
buf = io.BytesIO()
with ui.progressbar(length=file_len, label=label) as progress:
with gzip.GzipFile(fileobj=buf, mode='wb', mtime=0) as f:
while True:
data = file.read(16 * 1024)
if not data:
break
f.write(data)
progress.update(len(data))
# recompute file length
file_len = buf.seek(0, 2)
buf.seek(0, 0)
return buf, file_len
class V5Device(VEXDevice, SystemDevice):
vid_map = {'user': 1, 'system': 15, 'rms': 16, 'pros': 24, 'mw': 32} # type: Dict[str, int]
channel_map = {'pit': 0, 'download': 1} # type: Dict[str, int]
class FTCompleteOptions(IntEnum):
DONT_RUN = 0
RUN_IMMEDIATELY = 0b01
RUN_SCREEN = 0b11
VEX_CRC16 = CRC(16, 0x1021) # CRC-16-CCIT
VEX_CRC32 = CRC(32, 0x04C11DB7) # CRC-32 (the one used everywhere but has no name)
class SystemVersion(object):
class Product(IntEnum):
CONTROLLER = 0x11
BRAIN = 0x10
class BrainFlags(IntFlag):
pass
class ControllerFlags(IntFlag):
CONNECTED = 0x02
flag_map = {Product.BRAIN: BrainFlags, Product.CONTROLLER: ControllerFlags}
def __init__(self, data: tuple):
from semantic_version import Version
self.system_version = Version('{}.{}.{}-{}.{}'.format(*data[0:5]))
self.product = V5Device.SystemVersion.Product(data[5])
self.product_flags = self.flag_map[self.product](data[6])
def __str__(self):
return f'System Version: {self.system_version}\n' \
f' Product: {self.product.name}\n' \
f' Product Flags: {self.product_flags.value:x}'
class SystemStatus(object):
def __init__(self, data: tuple):
from semantic_version import Version
self.system_version = Version('{}.{}.{}-{}'.format(*data[0:4]))
self.cpu0_version = Version('{}.{}.{}-{}'.format(*data[4:8]))
self.cpu1_version = Version('{}.{}.{}-{}'.format(*data[8:12]))
self.touch_version = data[12]
self.system_id = data[13]
def __getitem__(self, item):
return self.__dict__[item]
def __init__(self, port: BasePort):
self._status = None
self._serial_cache = b''
super().__init__(port)
class DownloadChannel(object):
def __init__(self, device: 'V5Device', timeout: float = 5.):
self.device = device
self.timeout = timeout
self.did_switch = False
def __enter__(self):
version = self.device.query_system_version()
if version.product == V5Device.SystemVersion.Product.CONTROLLER:
self.device.default_timeout = 2.
if V5Device.SystemVersion.ControllerFlags.CONNECTED not in version.product_flags:
raise VEXCommError('V5 Controller doesn\'t appear to be connected to a V5 Brain', version)
ui.echo('Transferring V5 to download channel')
self.device.ft_transfer_channel('download')
self.did_switch = True
logger(__name__).debug('Sleeping for a while to let V5 start channel transfer')
time.sleep(.25) # wait at least 250ms before starting to poll controller if it's connected yet
version = self.device.query_system_version()
start_time = time.time()
# ask controller every 250 ms if it's connected until it is
while V5Device.SystemVersion.ControllerFlags.CONNECTED not in version.product_flags and \
time.time() - start_time < self.timeout:
version = self.device.query_system_version()
time.sleep(0.25)
if V5Device.SystemVersion.ControllerFlags.CONNECTED not in version.product_flags:
raise VEXCommError('Could not transfer V5 Controller to download channel', version)
logger(__name__).info('V5 should been transferred to higher bandwidth download channel')
return self
else:
return self
def __exit__(self, *exc):
if self.did_switch:
self.device.ft_transfer_channel('pit')
ui.echo('V5 has been transferred back to pit channel')
@property
def status(self):
if not self._status:
self._status = self.get_system_status()
return self._status
@property
def can_compress(self):
return self.status['system_version'] in Spec('>=1.0.5')
@property
def is_wireless(self):
version = self.query_system_version()
return version.product == V5Device.SystemVersion.Product.CONTROLLER and \
V5Device.SystemVersion.ControllerFlags.CONNECTED in version.product_flags
def generate_cold_hash(self, project: Project, extra: dict):
keys = {k: t.version for k, t in project.templates.items()}
keys.update(extra)
from hashlib import md5
from base64 import b64encode
msg = str(sorted(keys, key=lambda t: t[0])).encode('ascii')
name = b64encode(md5(msg).digest()).rstrip(b'=').decode('ascii')
if Spec('<=1.0.0-27').match(self.status['cpu0_version']):
# Bug prevents linked files from being > 18 characters long.
# 17 characters is probably good enough for hash, so no need to fail out
name = name[:17]
return name
def upload_project(self, project: Project, **kwargs):
assert project.target == 'v5'
monolith_path = project.location.joinpath(project.output)
if monolith_path.exists():
logger(__name__).debug(f'Monolith exists! ({monolith_path})')
if 'hot_output' in project.templates['kernel'].metadata and \
'cold_output' in project.templates['kernel'].metadata:
hot_path = project.location.joinpath(project.templates['kernel'].metadata['hot_output'])
cold_path = project.location.joinpath(project.templates['kernel'].metadata['cold_output'])
upload_hot_cold = False
if hot_path.exists() and cold_path.exists():
logger(__name__).debug(f'Hot and cold files exist! ({hot_path}; {cold_path})')
if monolith_path.exists():
monolith_mtime = monolith_path.stat().st_mtime
hot_mtime = hot_path.stat().st_mtime
logger(__name__).debug(f'Monolith last modified: {monolith_mtime}')
logger(__name__).debug(f'Hot last modified: {hot_mtime}')
if hot_mtime > monolith_mtime:
upload_hot_cold = True
logger(__name__).debug('Hot file is newer than monolith!')
else:
upload_hot_cold = True
if upload_hot_cold:
with hot_path.open(mode='rb') as hot:
with cold_path.open(mode='rb') as cold:
kwargs['linked_file'] = cold
kwargs['linked_remote_name'] = self.generate_cold_hash(project, {})
kwargs['linked_file_addr'] = int(
project.templates['kernel'].metadata.get('cold_addr', 0x03800000))
kwargs['addr'] = int(project.templates['kernel'].metadata.get('hot_addr', 0x07800000))
return self.write_program(hot, **kwargs)
if not monolith_path.exists():
raise ui.dont_send(Exception('No output files were found! Have you built your project?'))
with monolith_path.open(mode='rb') as pf:
return self.write_program(pf, **kwargs)
def generate_ini_file(self, remote_name: str = None, slot: int = 0, ini: ConfigParser = None, **kwargs):
project_ini = ConfigParser()
from semantic_version import Spec
default_icon = 'USER902x.bmp' if Spec('>=1.0.0-22').match(self.status['cpu0_version']) else 'USER999x.bmp'
project_ini['project'] = {
'version': str(kwargs.get('ide_version') or get_version()),
'ide': str(kwargs.get('ide') or 'PROS')
}
project_ini['program'] = {
'version': kwargs.get('version', '0.0.0') or '0.0.0',
'name': remote_name,
'slot': slot,
'icon': kwargs.get('icon', default_icon) or default_icon,
'description': kwargs.get('description', 'Created with PROS'),
'date': datetime.now().isoformat()
}
if ini:
project_ini.update(ini)
with StringIO() as ini_str:
project_ini.write(ini_str)
logger(__name__).info(f'Created ini: {ini_str.getvalue()}')
return ini_str.getvalue()
@with_download_channel
def write_program(self, file: typing.BinaryIO, remote_name: str = None, ini: ConfigParser = None, slot: int = 0,
file_len: int = -1, run_after: FTCompleteOptions = FTCompleteOptions.DONT_RUN,
target: str = 'flash', quirk: int = 0, linked_file: Optional[typing.BinaryIO] = None,
linked_remote_name: Optional[str] = None, linked_file_addr: Optional[int] = None,
compress_bin: bool = True, **kwargs):
with ui.Notification():
action_string = f'Uploading program "{remote_name}"'
finish_string = f'Finished uploading "{remote_name}"'
if hasattr(file, 'name'):
action_string += f' ({remote_name if remote_name else Path(file.name).name})'
finish_string += f' ({remote_name if remote_name else Path(file.name).name})'
action_string += f' to V5 slot {slot + 1} on {self.port}'
if compress_bin:
action_string += ' (compressed)'
ui.echo(action_string)
remote_base = f'slot_{slot + 1}'
if target == 'ddr':
self.write_file(file, f'{remote_base}.bin', file_len=file_len, type='bin',
target='ddr', run_after=run_after, linked_filename=linked_remote_name, **kwargs)
return
if not isinstance(ini, ConfigParser):
ini = ConfigParser()
if not remote_name:
remote_name = file.name
if len(remote_name) > 23:
logger(__name__).info('Truncating remote name to {} for length.'.format(remote_name[:20]))
remote_name = remote_name[:23]
ini_file = self.generate_ini_file(remote_name=remote_name, slot=slot, ini=ini, **kwargs)
logger(__name__).info(f'Created ini: {ini_file}')
if linked_file is not None:
self.upload_library(linked_file, remote_name=linked_remote_name, addr=linked_file_addr,
compress=compress_bin, force_upload=kwargs.pop('force_upload_linked', False))
bin_kwargs = {k: v for k, v in kwargs.items() if v in ['addr']}
if (quirk & 0xff) == 1:
# WRITE BIN FILE
self.write_file(file, f'{remote_base}.bin', file_len=file_len, type='bin', run_after=run_after,
linked_filename=linked_remote_name, compress=compress_bin, **bin_kwargs, **kwargs)
with BytesIO(ini_file.encode(encoding='ascii')) as ini_bin:
# WRITE INI FILE
self.write_file(ini_bin, f'{remote_base}.ini', type='ini', **kwargs)
elif (quirk & 0xff) == 0:
# STOP PROGRAM
self.execute_program_file('', run=False)
with BytesIO(ini_file.encode(encoding='ascii')) as ini_bin:
# WRITE INI FILE
self.write_file(ini_bin, f'{remote_base}.ini', type='ini', **kwargs)
# WRITE BIN FILE
self.write_file(file, f'{remote_base}.bin', file_len=file_len, type='bin', run_after=run_after,
linked_filename=linked_remote_name, compress=compress_bin, **bin_kwargs, **kwargs)
else:
raise ValueError(f'Unknown quirk option: {quirk}')
ui.finalize('upload', f'{finish_string} to V5')
def ensure_library_space(self, name: Optional[str] = None, vid: int_str = None,
target_name: Optional[str] = None):
"""
Uses algorithms, for loops, and if statements to determine what files should be removed
This method searches for any orphaned files:
- libraries without any user files linking to it
- user files whose link does not exist
and removes them without prompt
It will also ensure that only 3 libraries are being used on the V5.
If there are more than 3 libraries, then the oldest libraries are elected for eviction after a prompt.
"oldest" is determined by the most recently uploaded library or program linking to that library
"""
assert not (vid is None and name is not None)
used_libraries = []
if vid is not None:
if isinstance(vid, str):
vid = self.vid_map[vid.lower()]
# assume all libraries
unused_libraries = [
(vid, l['filename'])
for l
in [self.get_file_metadata_by_idx(i)
for i in range(0, self.get_dir_count(vid=vid))
]
]
if name is not None:
if (vid, name) in unused_libraries:
# we'll be overwriting the library anyway, so remove it as a candidate for removal
unused_libraries.remove((vid, name))
used_libraries.append((vid, name))
else:
unused_libraries = []
programs: Dict[str, Dict] = {
# need the linked file metadata, so we have to use the get_file_metadata_by_name command
p['filename']: self.get_file_metadata_by_name(p['filename'], vid='user')
for p
in [self.get_file_metadata_by_idx(i)
for i in range(0, self.get_dir_count(vid='user'))]
if p['type'] == 'bin'
}
library_usage: Dict[Tuple[int, str], List[str]] = defaultdict(list)
for program_name, metadata in programs.items():
library_usage[(metadata['linked_vid'], metadata['linked_filename'])].append(program_name)
orphaned_files: List[Union[str, Tuple[int, str]]] = []
for link, program_names in library_usage.items():
linked_vid, linked_name = link
if name is not None and linked_vid == vid and linked_name == name:
logger(__name__).debug(f'{program_names} will be removed because the library will be replaced')
orphaned_files.extend(program_names)
elif linked_vid != 0: # linked_vid == 0 means there's no link. Can't be orphaned if there's no link
if link in unused_libraries:
# the library is being used
logger(__name__).debug(f'{link} is being used')
unused_libraries.remove(link)
used_libraries.append(link)
else:
try:
self.get_file_metadata_by_name(linked_name, vid=linked_vid)
logger(__name__).debug(f'{link} exists')
used_libraries.extend(link)
except VEXCommError as e:
logger(__name__).debug(dont_send(e))
logger(__name__).debug(f'{program_names} will be removed because {link} does not exist')
orphaned_files.extend(program_names)
orphaned_files.extend(unused_libraries)
if target_name is not None and target_name in orphaned_files:
# the file will be overwritten anyway
orphaned_files.remove(target_name)
if len(orphaned_files) > 0:
logger(__name__).warning(f'Removing {len(orphaned_files)} orphaned file(s) ({orphaned_files})')
for file in orphaned_files:
if isinstance(file, tuple):
self.erase_file(file_name=file[1], vid=file[0])
else:
self.erase_file(file_name=file, erase_all=True, vid='user')
if len(used_libraries) > 3:
libraries = [
(linked_vid, linked_name, self.get_file_metadata_by_name(linked_name, vid=linked_vid)['timestamp'])
for linked_vid, linked_name
in used_libraries
]
library_usage_timestamps = sorted([
(
linked_vid,
linked_name,
# get the most recent timestamp of the library and all files linking to it
max(linked_timestamp, *[programs[p]['timestamp'] for p in library_usage[(linked_vid, linked_name)]])
)
for linked_vid, linked_name, linked_timestamp
in libraries
], key=lambda t: t[2])
evicted_files: List[Union[str, Tuple[int, str]]] = []
evicted_file_list = ''
for evicted_library in library_usage_timestamps[:3]:
evicted_files.append(evicted_library[0:2])
evicted_files.extend(library_usage[evicted_library[0:2]])
evicted_file_list += evicted_library[1] + ', '
evicted_file_list += ', '.join(library_usage[evicted_file_list[0:2]])
evicted_file_list = evicted_file_list[:2] # remove last ", "
assert len(evicted_files) > 0
if confirm(f'There are too many files on the V5. PROS can remove the following suggested old files: '
f'{evicted_file_list}',
title='Confirm file eviction plan:'):
for file in evicted_files:
if isinstance(file, tuple):
self.erase_file(file_name=file[1], vid=file[0])
else:
self.erase_file(file_name=file, erase_all=True, vid='user')
def upload_library(self, file: typing.BinaryIO, remote_name: str = None, file_len: int = -1, vid: int_str = 'pros',
force_upload: bool = False, compress: bool = True, **kwargs):
"""
Upload a file used for linking. Contains the logic to check if the file is already present in the filesystem
and to prompt the user if we need to evict a library (and user programs).
If force_upload is true, then skips the "is already present in the filesystem check"
"""
if not remote_name:
remote_name = file.name
if len(remote_name) > 23:
logger(__name__).info('Truncating remote name to {} for length.'.format(remote_name[:23]))
remote_name = remote_name[:23]
if file_len < 0:
file_len = file.seek(0, 2)
file.seek(0, 0)
if compress and self.can_compress:
file, file_len = compress_file(file, file_len, label='Compressing library')
crc32 = self.VEX_CRC32.compute(file.read(file_len))
file.seek(0, 0)
if not force_upload:
try:
response = self.get_file_metadata_by_name(remote_name, vid)
logger(__name__).debug(response)
logger(__name__).debug({'file len': file_len, 'crc': crc32})
if response['size'] == file_len and response['crc'] == crc32:
ui.echo('Library is already onboard V5')
return
else:
logger(__name__).warning(f'Library onboard doesn\'t match! '
f'Length was {response["size"]} but expected {file_len} '
f'CRC: was {response["crc"]:x} but expected {crc32:x}')
except VEXCommError as e:
logger(__name__).debug(e)
else:
logger(__name__).info('Skipping already-uploaded checks')
logger(__name__).debug('Going to worry about uploading the file now')
self.ensure_library_space(remote_name, vid, )
self.write_file(file, remote_name, file_len, vid=vid, **kwargs)
def read_file(self, file: typing.IO[bytes], remote_file: str, vid: int_str = 'user', target: int_str = 'flash',
addr: Optional[int] = None, file_len: Optional[int] = None):
if isinstance(vid, str):
vid = self.vid_map[vid.lower()]
if addr is None:
metadata = self.get_file_metadata_by_name(remote_file, vid=vid)
addr = metadata['addr']
wireless = self.is_wireless
ft_meta = self.ft_initialize(remote_file, function='download', vid=vid, target=target, addr=addr)
if file_len is None:
file_len = ft_meta['file_size']
if wireless and file_len > 0x25000:
confirm(f'You\'re about to download {file_len} bytes wirelessly. This could take some time, and you should '
f'consider downloading directly with a wire.', abort=True, default=False)
max_packet_size = ft_meta['max_packet_size']
with ui.progressbar(length=file_len, label='Downloading {}'.format(remote_file)) as progress:
for i in range(0, file_len, max_packet_size):
packet_size = max_packet_size
if i + max_packet_size > file_len:
packet_size = file_len - i
file.write(self.ft_read(addr + i, packet_size))
progress.update(packet_size)
logger(__name__).debug('Completed {} of {} bytes'.format(i + packet_size, file_len))
self.ft_complete()
def write_file(self, file: typing.BinaryIO, remote_file: str, file_len: int = -1,
run_after: FTCompleteOptions = FTCompleteOptions.DONT_RUN, linked_filename: Optional[str] = None,
linked_vid: int_str = 'pros', compress: bool = False, **kwargs):
if file_len < 0:
file_len = file.seek(0, 2)
file.seek(0, 0)
display_name = remote_file
if hasattr(file, 'name'):
display_name = f'{remote_file} ({Path(file.name).name})'
if compress and self.can_compress:
file, file_len = compress_file(file, file_len)
if self.is_wireless and file_len > 0x25000:
confirm(f'You\'re about to upload {file_len} bytes wirelessly. This could take some time, and you should '
f'consider uploading directly with a wire.', abort=True, default=False)
crc32 = self.VEX_CRC32.compute(file.read(file_len))
file.seek(0, 0)
addr = kwargs.get('addr', 0x03800000)
logger(__name__).info('Transferring {} ({} bytes) to the V5 from {}'.format(remote_file, file_len, file))
ft_meta = self.ft_initialize(remote_file, function='upload', length=file_len, crc=crc32, **kwargs)
if linked_filename is not None:
logger(__name__).debug('Setting file link')
self.ft_set_link(linked_filename, vid=linked_vid)
assert ft_meta['file_size'] >= file_len
if len(remote_file) > 24:
logger(__name__).info('Truncating {} to {} due to length'.format(remote_file, remote_file[:24]))
remote_file = remote_file[:24]
max_packet_size = int(ft_meta['max_packet_size'] / 2)
with ui.progressbar(length=file_len, label='Uploading {}'.format(display_name)) as progress:
for i in range(0, file_len, max_packet_size):
packet_size = max_packet_size
if i + max_packet_size > file_len:
packet_size = file_len - i
logger(__name__).debug('Writing {} bytes at 0x{:02X}'.format(packet_size, addr + i))
self.ft_write(addr + i, file.read(packet_size))
progress.update(packet_size)
logger(__name__).debug('Completed {} of {} bytes'.format(i + packet_size, file_len))
logger(__name__).debug('Data transfer complete, sending ft complete')
if compress and self.status['system_version'] in Spec('>=1.0.5'):
logger(__name__).info('Closing gzip file')
file.close()
self.ft_complete(options=run_after)
@with_download_channel
def capture_screen(self) -> Tuple[List[List[int]], int, int]:
self.sc_init()
width, height = 512, 272
file_size = width * height * 4 # ARGB
rx_io = BytesIO()
self.read_file(rx_io, '', vid='system', target='screen', addr=0, file_len=file_size)
rx = rx_io.getvalue()
rx = struct.unpack('<{}I'.format(len(rx) // 4), rx)
data = [[] for _ in range(height)]
for y in range(height):
for x in range(width - 1):
if x < 480:
px = rx[y * width + x]
data[y].append((px & 0xff0000) >> 16)
data[y].append((px & 0x00ff00) >> 8)
data[y].append(px & 0x0000ff)
return data, 480, height
def used_slots(self) -> Dict[int, Optional[str]]:
with ui.Notification():
rv = {}
for slot in range(1, 9):
ini = self.read_ini(f'slot_{slot}.ini')
rv[slot] = ini['program']['name'] if ini is not None else None
return rv
def read_ini(self, remote_name: str) -> Optional[ConfigParser]:
try:
rx_io = BytesIO()
self.read_file(rx_io, remote_name)
config = ConfigParser()
rx_io.seek(0, 0)
config.read_string(rx_io.read().decode('ascii'))
return config
except VEXCommError as e:
return None
@retries
def query_system_version(self) -> SystemVersion:
logger(__name__).debug('Sending simple 0xA408 command')
ret = self._txrx_simple_struct(0xA4, '>8B')
logger(__name__).debug('Completed simple 0xA408 command')
return V5Device.SystemVersion(ret)
@retries
def ft_transfer_channel(self, channel: int_str):
logger(__name__).debug(f'Transferring to {channel} channel')
logger(__name__).debug('Sending ext 0x10 command')
if isinstance(channel, str):
channel = self.channel_map[channel]
assert isinstance(channel, int) and 0 <= channel <= 1
self._txrx_ext_packet(0x10, struct.pack('<2B', 1, channel), rx_length=0)
logger(__name__).debug('Completed ext 0x10 command')
@retries
def ft_initialize(self, file_name: str, **kwargs) -> Dict[str, Any]:
logger(__name__).debug('Sending ext 0x11 command')
options = {
'function': 'upload',
'target': 'flash',
'vid': 'user',
'overwrite': True,
'options': 0,
'length': 0,
'addr': 0x03800000,
'crc': 0,
'type': 'bin',
'timestamp': datetime.now(),
'version': 0x01_00_00_00,
'name': file_name
}
options.update({k: v for k, v in kwargs.items() if k in options and v is not None})
if isinstance(options['function'], str):
options['function'] = {'upload': 1, 'download': 2}[options['function'].lower()]
if isinstance(options['target'], str):
options['target'] = {'ddr': 0, 'flash': 1, 'screen': 2}[options['target'].lower()]
if isinstance(options['vid'], str):
options['vid'] = self.vid_map[options['vid'].lower()]
if isinstance(options['type'], str):
options['type'] = options['type'].encode(encoding='ascii')
if isinstance(options['name'], str):
options['name'] = options['name'].encode(encoding='ascii')
options['options'] |= 1 if options['overwrite'] else 0
options['timestamp'] = int((options['timestamp'] - datetime(2000, 1, 1)).total_seconds())
logger(__name__).debug('Initializing file transfer w/: {}'.format(options))
tx_payload = struct.pack("<4B3I4s2I24s", options['function'], options['target'], options['vid'],
options['options'], options['length'], options['addr'], options['crc'],
options['type'], options['timestamp'], options['version'], options['name'])
rx = self._txrx_ext_struct(0x11, tx_payload, "<H2I", timeout=kwargs.get('timeout', self.default_timeout * 5))
rx = dict(zip(['max_packet_size', 'file_size', 'crc'], rx))
logger(__name__).debug('response: {}'.format(rx))
logger(__name__).debug('Completed ext 0x11 command')
return rx
@retries
def ft_complete(self, options: FTCompleteOptions = FTCompleteOptions.DONT_RUN):
logger(__name__).debug('Sending ext 0x12 command')
if isinstance(options, bool):
options = self.FTCompleteOptions.RUN_IMMEDIATELY if options else self.FTCompleteOptions.DONT_RUN
tx_payload = struct.pack("<B", options.value)
ret = self._txrx_ext_packet(0x12, tx_payload, 0, timeout=self.default_timeout * 10)
logger(__name__).debug('Completed ext 0x12 command')
return ret
@retries
def ft_write(self, addr: int, payload: Union[Iterable, bytes, bytearray, str]):
logger(__name__).debug('Sending ext 0x13 command')
if isinstance(payload, str):
payload = payload.encode(encoding='ascii')
if len(payload) % 4 != 0:
padded_payload = bytes([*payload, *([0] * (4 - (len(payload) % 4)))])
else:
padded_payload = payload
tx_fmt = "<I{}s".format(len(padded_payload))
tx_payload = struct.pack(tx_fmt, addr, padded_payload)
ret = self._txrx_ext_packet(0x13, tx_payload, 0)
logger(__name__).debug('Completed ext 0x13 command')
return ret
@retries
def ft_read(self, addr: int, n_bytes: int) -> bytearray:
logger(__name__).debug('Sending ext 0x14 command')
actual_n_bytes = n_bytes + (0 if n_bytes % 4 == 0 else 4 - n_bytes % 4)
ui.logger(__name__).debug(dict(actual_n_bytes=actual_n_bytes, addr=addr))
tx_payload = struct.pack("<IH", addr, actual_n_bytes)
rx_fmt = "<I{}s".format(actual_n_bytes)
ret = self._txrx_ext_struct(0x14, tx_payload, rx_fmt, check_ack=False)[1][:n_bytes]
logger(__name__).debug('Completed ext 0x14 command')
return ret
@retries
def ft_set_link(self, link_name: str, vid: int_str = 'user', options: int = 0):
logger(__name__).debug('Sending ext 0x15 command')
if isinstance(vid, str):
vid = self.vid_map[vid.lower()]
if isinstance(link_name, str):
link_name = link_name.encode(encoding='ascii')
logger(__name__).debug(f'Linking current ft to {link_name} (vid={vid})')
tx_payload = struct.pack("<2B24s", vid, options, link_name)
ret = self._txrx_ext_packet(0x15, tx_payload, 0)
logger(__name__).debug('Completed ext 0x15 command')
return ret
@retries
def get_dir_count(self, vid: int_str = 1, options: int = 0) \
-> int:
logger(__name__).debug('Sending ext 0x16 command')
if isinstance(vid, str):
vid = self.vid_map[vid.lower()]
tx_payload = struct.pack("<2B", vid, options)
ret = self._txrx_ext_struct(0x16, tx_payload, "<h")[0]
logger(__name__).debug('Completed ext 0x16 command')
return ret
@retries
def get_file_metadata_by_idx(self, file_idx: int, options: int = 0) \
-> Dict[str, Any]:
logger(__name__).debug('Sending ext 0x17 command')
tx_payload = struct.pack("<2B", file_idx, options)
rx = self._txrx_ext_struct(0x17, tx_payload, "<B3L4sLL24s")
rx = dict(zip(['idx', 'size', 'addr', 'crc', 'type', 'timestamp', 'version', 'filename'], rx))
rx['type'] = decode_bytes_to_str(rx['type'])
rx['timestamp'] = datetime(2000, 1, 1) + timedelta(seconds=rx['timestamp'])
rx['filename'] = decode_bytes_to_str(rx['filename'])
logger(__name__).debug('Completed ext 0x17 command')
return rx
@retries
def execute_program_file(self, file_name: str, vid: int_str = 'user', run: bool = True):
logger(__name__).debug('Sending ext 0x18 command')
if isinstance(vid, str):
vid = self.vid_map[vid.lower()]
options = 0
options |= (0 if run else 0x80)
logger(__name__).debug('VID: {}\tOptions: {}\tFile name: {}\tRun: {}'.format(vid, options, file_name, run))
tx_payload = struct.pack("<2B24s", vid, options, file_name.encode(encoding='ascii'))
ret = self._txrx_ext_packet(0x18, tx_payload, 0)
logger(__name__).debug('Completed ext 0x18 command')
return ret
@retries
def get_file_metadata_by_name(self, file_name: str, vid: int_str = 1, options: int = 0) \
-> Dict[str, Any]:
logger(__name__).debug('Sending ext 0x19 command')
if isinstance(vid, str):
vid = self.vid_map[vid.lower()]
ui.logger(__name__).debug(f'Options: {dict(vid=vid, file_name=file_name)}')
tx_payload = struct.pack("<2B24s", vid, options, file_name.encode(encoding='ascii'))
rx = self._txrx_ext_struct(0x19, tx_payload, "<B3L4sLL24s")
rx = dict(zip(['linked_vid', 'size', 'addr', 'crc', 'type', 'timestamp', 'version', 'linked_filename'], rx))
logger(__name__).debug(rx)
rx['type'] = decode_bytes_to_str(rx['type'])
rx['timestamp'] = datetime(2000, 1, 1) + timedelta(seconds=rx['timestamp'])
rx['linked_filename'] = decode_bytes_to_str(rx['linked_filename'])
logger(__name__).debug('Completed ext 0x19 command')
return rx
@retries
def set_program_file_metadata(self, file_name: str, **kwargs):
logger(__name__).debug('Sending ext 0x1A command')
options = {
'vid': 'user',
'options': 0,
'addr': 0xff_ff_ff_ff,
'type': b'\xff\xff\xff\xff',
'timestamp': 0xff_ff_ff_ff,
'version': 0xff_ff_ff_ff
} # Dict[str, Any]
options.update(**kwargs)
if isinstance(options['vid'], str):
options['vid'] = self.vid_map[options['vid'].lower()]
if isinstance(options['timestamp'], datetime):
assert (isinstance(options['timestamp'], datetime))
options['timestamp'] = (options['timestamp'] - datetime(2000, 1, 1)).get_seconds()
if isinstance(options['type'], str):
options['type'] = options['type'].encode(encoding='ascii')
tx_payload = struct.pack("<2BI4s2I24s", options['vid'], options['options'],
options['addr'], options['type'], options['timestamp'],
options['version'], file_name.encode(encoding='ascii'))
ret = self._txrx_ext_packet(0x1A, tx_payload, 0)
logger(__name__).debug('Completed ext 0x1A command')
return ret
@retries
def erase_file(self, file_name: str, erase_all: bool = False, vid: int_str = 'user'):
logger(__name__).debug('Sending ext 0x1B command')
if isinstance(vid, str):
vid = self.vid_map[vid.lower()]
options = 0
options |= (0x80 if erase_all else 0)
tx_payload = struct.pack('<2B24s', vid, options, file_name.encode(encoding='ascii'))
recv = self._txrx_ext_packet(0x1B, tx_payload, 0)
self.ft_complete()
logger(__name__).debug('Completed ext 0x1B command')
return recv
@retries
def get_program_file_slot(self, file_name: str, vid: int = 1, options: int = 0) \
-> Dict[str, Any]:
logger(__name__).debug('Sending ext 0x1C command')
tx_payload = struct.pack("<2B24s", vid, options, file_name.encode(encoding='ascii'))
ret = self._txrx_ext_struct(0x1C, tx_payload, "<B")[0]
logger(__name__).debug('Completed ext 0x1C command')
return ret
@retries
def get_device_status(self):
raise NotImplementedError()
@retries
def get_system_status(self) -> SystemStatus:
from semantic_version import Version
logger(__name__).debug('Sending ext 0x22 command')
version = self.query_system_version()
if (version.product == V5Device.SystemVersion.Product.BRAIN and version.system_version in Spec('<1.0.13')) or \
(version.product == V5Device.SystemVersion.Product.CONTROLLER and version.system_version in Spec('<1.0.0-0.70')):
schema = '<x12B3xBI12x'
else:
schema = '<x12B3xBI12xB3x'
rx = self._txrx_ext_struct(0x22, [], schema)
logger(__name__).debug('Completed ext 0x22 command')
return V5Device.SystemStatus(rx)
@retries
def user_fifo_read(self) -> bytes:
# I can't really think of a better way to only return when a full
# COBS message was written than to just cache the data until we hit a \x00.
# read/write are the same command, behavior dictated by specifying
# length-to-read as 0xFF and providing additional payload bytes to write or
# specifying a length-to-read and no additional data to read.
logger(__name__).debug('Sending ext 0x27 command (read)')
# specifying a length to read (0x40 bytes) with no additional payload data.
tx_payload = struct.pack("<2B", self.channel_map['download'], 0x40)
# RX length isn't always 0x40 (end of buffer reached), so don't check_length.
self._serial_cache += self._txrx_ext_packet(0x27, tx_payload, 0, check_length=False)[1:]
logger(__name__).debug('Completed ext 0x27 command (read)')
# if _serial_cache doesn't have a \x00, pretend we didn't read anything.
if b'\x00' not in self._serial_cache:
return b''
# _serial_cache has a \x00, split off the beginning part and hand it down.
parts = self._serial_cache.split(b'\x00')
ret = parts[0] + b'\x00'
self._serial_cache = b'\x00'.join(parts[1:])
return ret
@retries
def user_fifo_write(self, payload: Union[Iterable, bytes, bytearray, str]):
# Not currently implemented
return
logger(__name__).debug('Sending ext 0x27 command (write)')
max_packet_size = 224
pl_len = len(payload)
for i in range(0, pl_len, max_packet_size):
packet_size = max_packet_size
if i + max_packet_size > pl_len:
packet_size = pl_len - i
logger(__name__).debug(f'Writing {packet_size} bytes to user FIFO')
self._txrx_ext_packet(0x27, b'\x01\x00' + payload[i:packet_size], 0, check_length=False)[1:]
logger(__name__).debug('Completed ext 0x27 command (write)')
@retries
def sc_init(self) -> None:
"""
Send command to initialize screen capture
"""
# This will only copy data in memory, not send!
logger(__name__).debug('Sending ext 0x28 command')
self._txrx_ext_struct(0x28, [], '')
logger(__name__).debug('Completed ext 0x28 command')
@retries
def kv_read(self, kv: str) -> bytearray:
logger(__name__).debug('Sending ext 0x2e command')
encoded_kv = f'{kv}\0'.encode(encoding='ascii')
tx_payload = struct.pack(f'<{len(encoded_kv)}s', encoded_kv)
# Because the length of the kernel variables is not known, use None to indicate we are recieving an unknown length.
ret = self._txrx_ext_packet(0x2e, tx_payload, 1, check_length=False, check_ack=True)
logger(__name__).debug('Completed ext 0x2e command')
return ret
@retries
def kv_write(self, kv: str, payload: Union[Iterable, bytes, bytearray, str]):
logger(__name__).debug('Sending ext 0x2f command')
encoded_kv = f'{kv}\0'.encode(encoding='ascii')
kv_to_max_bytes = {
'teamnumber': 7,
'robotname': 16
}
if len(payload) > kv_to_max_bytes.get(kv, 254):
print(f'Truncating input to meet maximum value length ({kv_to_max_bytes[kv]} characters).')
# Trim down size of payload to fit within the 255 byte limit and add null terminator.
payload = payload[:kv_to_max_bytes.get(kv, 254)] + "\0"
if isinstance(payload, str):
payload = payload.encode(encoding='ascii')
tx_fmt =f'<{len(encoded_kv)}s{len(payload)}s'
tx_payload = struct.pack(tx_fmt, encoded_kv, payload)
ret = self._txrx_ext_packet(0x2f, tx_payload, 1, check_length=False, check_ack=True)
logger(__name__).debug('Completed ext 0x2f command')
return payload
def _txrx_ext_struct(self, command: int, tx_data: Union[Iterable, bytes, bytearray],
unpack_fmt: str, check_length: bool = True, check_ack: bool = True,
timeout: Optional[float] = None) -> Tuple:
"""
Transmits and receives an extended command to the V5, automatically unpacking the values according to unpack_fmt
which gets passed into struct.unpack. The size of the payload is determined from the fmt string
:param command: Extended command code
:param tx_data: Transmission payload
:param unpack_fmt: Format to expect the raw payload to be in
:param retries: Number of retries to attempt to parse the output before giving up
:param rx_wait: Amount of time to wait after transmitting the packet before reading the response
:param check_ack: If true, then checks the first byte of the extended payload as an AK byte
:return: A tuple unpacked according to the unpack_fmt
"""
rx = self._txrx_ext_packet(command, tx_data, struct.calcsize(unpack_fmt),
check_length=check_length, check_ack=check_ack, timeout=timeout)
logger(__name__).debug('Unpacking with format: {}'.format(unpack_fmt))
return struct.unpack(unpack_fmt, rx)
@classmethod
def _rx_ext_packet(cls, msg: Message, command: int, rx_length: int, check_ack: bool = True,
check_length: bool = True) -> Message:
"""
Parse a received packet
:param msg: data to parse
:param command: The extended command sent
:param rx_length: Expected length of the received data
:param check_ack: If true, checks the first byte as an AK byte
:param tx_payload: what was sent, used if an exception needs to be thrown
:return: The payload of the extended message
"""
assert (msg['command'] == 0x56)
if not cls.VEX_CRC16.compute(msg.rx) == 0:
raise VEXCommError("CRC of message didn't match 0: {}".format(cls.VEX_CRC16.compute(msg.rx)), msg)
assert (msg['payload'][0] == command)
msg = msg['payload'][1:-2]
if check_ack:
nacks = {
0xFF: "General NACK",
0xCE: "CRC error on recv'd packet",
0xD0: "Payload too small",
0xD1: "Request transfer size too large",
0xD2: "Program CRC error",
0xD3: "Program file error",
0xD4: "Attempted to download/upload uninitialized",
0xD5: "Initialization invalid for this function",
0xD6: "Data not a multiple of 4 bytes",
0xD7: "Packet address does not match expected",
0xD8: "Data downloaded does not match initial length",
0xD9: "Directory entry does not exist",
0xDA: "Max user files, no more room for another user program",
0xDB: "User file exists"
}
if msg[0] in nacks.keys():
raise VEXCommError("Device NACK'd with reason: {}".format(nacks[msg[0]]), msg)
elif msg[0] != cls.ACK_BYTE:
raise VEXCommError("Device didn't ACK", msg)
msg = msg[1:]
if len(msg) > 0:
logger(cls).debug('Set msg window to {}'.format(bytes_to_str(msg)))
if len(msg) < rx_length and check_length:
raise VEXCommError(f'Received length is less than {rx_length} (got {len(msg)}).', msg)
elif len(msg) > rx_length and check_length:
ui.echo(f'WARNING: Recieved length is more than {rx_length} (got {len(msg)}). Consider upgrading the PROS (CLI Version: {get_version()}).')
return msg
def _txrx_ext_packet(self, command: int, tx_data: Union[Iterable, bytes, bytearray],
rx_length: int, check_length: bool = True,
check_ack: bool = True, timeout: Optional[float] = None) -> Message:
"""
Transmits and receives an extended command to the V5.
:param command: Extended command code
:param tx_data: Tranmission payload
:param rx_length: Expected length of the received extended payload
:param rx_wait: Amount of time to wait after transmitting the packet before reading the response
:param check_ack: If true, then checks the first byte of the extended payload as an AK byte
:return: A bytearray of the extended payload
"""
tx_payload = self._form_extended_payload(command, tx_data)
rx = self._txrx_packet(0x56, tx_data=tx_payload, timeout=timeout)
return self._rx_ext_packet(rx, command, rx_length, check_ack=check_ack, check_length=check_length)
@classmethod
def _form_extended_payload(cls, msg: int, payload: Union[Iterable, bytes, bytearray]) -> bytearray:
if payload is None:
payload = bytearray()
payload_length = len(payload)
assert payload_length <= 0x7f_ff
if payload_length >= 0x80:
payload_length = [(payload_length >> 8) | 0x80, payload_length & 0xff]
else:
payload_length = [payload_length]
packet = bytearray([msg, *payload_length, *payload])
crc = cls.VEX_CRC16.compute(bytes([*cls._form_simple_packet(0x56), *packet]))
packet = bytearray([*packet, crc >> 8, crc & 0xff])
assert (cls.VEX_CRC16.compute(bytes([*cls._form_simple_packet(0x56), *packet])) == 0)
return packet
|
mpl-2.0
|
5ef9d59d95974def2a0f2e8b64faf1d3
| 48.214286
| 151
| 0.575919
| 3.839759
| false
| false
| false
| false
|
eregs/regulations-parser
|
tests/notice_sxs_tests.py
|
3
|
23353
|
# vim: set encoding=utf-8
from unittest import TestCase
from lxml import etree
from regparser.notice import sxs
class NoticeSxsTests(TestCase):
def test_find_page(self):
xml = """<ROOT>
<P />
Text
<P />
<PRTPAGE P="333" />
<P />
<PRTPAGE />
<P />
<PRTPAGE P="334" />
</ROOT>"""
xml = etree.fromstring(xml)
for l in range(0, 6):
self.assertEqual(332, sxs.find_page(xml, l, 332))
for l in range(6, 10):
self.assertEqual(333, sxs.find_page(xml, l, 332))
for l in range(10, 15):
self.assertEqual(334, sxs.find_page(xml, l, 332))
def test_find_section_by_section(self):
sxs_xml = """
<HD SOURCE="HD2">Sub Section</HD>
<P>Content</P>
<HD SOURCE="HD3">Sub sub section</HD>
<EXTRACT><P>This is in an extract</P></EXTRACT>
<P>Sub Sub Content</P>"""
full_xml = """
<ROOT>
<SUPLINF>
<HD SOURCE="HED">Supplementary Info</HD>
<HD SOURCE="HD1">Stuff Here</HD>
<P>Some Content</P>
<HD SOURCE="HD1">X. Section-by-Section Analysis</HD>
{0}
<HD SOURCE="HD1">Section that follows</HD>
<P>Following Content</P>
</SUPLINF>
</ROOT>""".format(sxs_xml)
# Must use text field since the nodes are not directly comparable
sxs_texts = ['Sub Section', 'Content', 'Sub sub section',
'This is in an extract', 'Sub Sub Content']
computed = sxs.find_section_by_section(etree.fromstring(full_xml))
self.assertEqual(sxs_texts, [el.text for el in computed])
def test_find_section_by_section_intro_text(self):
sxs_xml = """
<P>Some intro text</P>
<P>This text includes a reference to Section 8675.309(a)</P>
<HD SOURCE="HD2">Section 8675.309 Stuff</HD>
<P>Content</P>"""
full_xml = """
<ROOT>
<SUPLINF>
<HD SOURCE="HED">Supplementary Info</HD>
<HD SOURCE="HD1">Stuff Here</HD>
<P>Some Content</P>
<HD SOURCE="HD1">X. Section-by-Section Analysis</HD>
{0}
<HD SOURCE="HD1">Section that follows</HD>
<P>Following Content</P>
</SUPLINF>
</ROOT>""".format(sxs_xml)
sxs_texts = ['Section 8675.309 Stuff', 'Content']
computed = sxs.find_section_by_section(etree.fromstring(full_xml))
self.assertEqual(sxs_texts, [el.text for el in computed])
def test_find_section_by_section_not_present(self):
full_xml = """
<ROOT>
<SUPLINF>
<HD SOURCE="HED">Supplementary Info</HD>
<HD SOURCE="HD1">This is not sxs Analysis</HD>
<P>Stuff</P>
<P>Stuff2</P>
<FTNT>Foot Note</FTNT>
</SUPLINF>
</ROOT>"""
self.assertEqual([], sxs.find_section_by_section(etree.fromstring(
full_xml)))
def test_build_section_by_section(self):
xml = """
<ROOT>
<HD SOURCE="HD3">Section Header</HD>
<P>Content 1</P>
<P>Content 2</P>
<HD SOURCE="HD4">Sub Section Header</HD>
<P>Content 3</P>
<HD SOURCE="HD4">Another</HD>
<P>Content 4</P>
<HD SOURCE="HD3">4(b) Header</HD>
<P>Content 5</P>
<FP>Content 6</FP>
</ROOT>"""
sxs_lst = list(etree.fromstring(xml).xpath("/ROOT/*"))
structures = sxs.build_section_by_section(sxs_lst, 83, '100')
self.assertEqual(2, len(structures))
self.assertEqual(structures[0], {
'title': 'Section Header',
'paragraphs': [
'Content 1',
'Content 2'
],
'footnote_refs': [],
'children': [
{
'title': 'Sub Section Header',
'paragraphs': ['Content 3'],
'children': [],
'footnote_refs': [],
'page': 83
},
{
'title': 'Another',
'paragraphs': ['Content 4'],
'children': [],
'footnote_refs': [],
'page': 83
}],
'page': 83
})
self.assertEqual(structures[1], {
'title': '4(b) Header',
'paragraphs': ['Content 5', 'Content 6'],
'labels': ['100-4-b'],
'page': 83,
'footnote_refs': [],
'children': []
})
def test_build_section_by_section_footnotes(self):
"""We only account for paragraph tags right now"""
xml = """
<ROOT>
<HD SOURCE="HD3">Section Header</HD>
<P>Content 1</P>
<FTNT>Content A</FTNT>
<P>Content 2</P>
</ROOT>"""
sxs_lst = list(etree.fromstring(xml).xpath("/ROOT/*"))
structures = sxs.build_section_by_section(sxs_lst, 21, '100')
self.assertEqual(1, len(structures))
self.assertEqual(structures[0], {
'title': 'Section Header',
'paragraphs': [
'Content 1',
'Content 2',
],
'children': [],
'footnote_refs': [],
'page': 21
})
def test_build_section_by_section_label(self):
"""Check that labels are being added correctly"""
xml = """
<ROOT>
<HD SOURCE="HD2">Section 99.3 Info</HD>
<P>Content 1</P>
<HD SOURCE="HD3">3(q)(4) More Info</HD>
<P>Content 2</P>
</ROOT>"""
sxs_lst = list(etree.fromstring(xml).xpath("/ROOT/*"))
structures = sxs.build_section_by_section(sxs_lst, 2323, '99')
self.assertEqual(1, len(structures))
self.assertEqual(structures[0], {
'title': 'Section 99.3 Info',
'labels': ['99-3'],
'paragraphs': ['Content 1'],
'page': 2323,
'footnote_refs': [],
'children': [{
'title': '3(q)(4) More Info',
'labels': ['99-3-q-4'],
'paragraphs': ['Content 2'],
'page': 2323,
'footnote_refs': [],
'children': []
}]
})
def test_build_section_by_section_extra_tags(self):
"""Check that labels are being added correctly"""
xml = """
<ROOT>
<HD SOURCE="HD2">Section 99.3 Info</HD>
<P>Content<PRTPAGE P="50249"/>1</P>
<P>Content <SU>99</SU><FTREF />2</P>
<P>Content <E T="03">Emph</E></P>
</ROOT>"""
sxs_lst = list(etree.fromstring(xml).xpath("/ROOT/*"))
structures = sxs.build_section_by_section(sxs_lst, 939, '99')
self.assertEqual(1, len(structures))
self.assertEqual(structures[0], {
'title': 'Section 99.3 Info',
'labels': ['99-3'],
'page': 939,
'paragraphs': ['Content 1', 'Content 2',
'Content <em data-original="E-03">Emph</em>'],
'footnote_refs': [{'paragraph': 1,
'reference': '99',
'offset': 8}],
'children': []
})
def test_build_section_by_section_same_level(self):
"""Check that labels are being added correctly"""
xml = """
<ROOT>
<HD SOURCE="HD2">Section 99.3 Something Here</HD>
<HD SOURCE="HD3">3(q)(4) More Info</HD>
<P>Content 1</P>
<HD SOURCE="HD3">Subheader, Really</HD>
<P>Content 2</P>
</ROOT>"""
sxs_lst = list(etree.fromstring(xml).xpath("/ROOT/*"))
structures = sxs.build_section_by_section(sxs_lst, 765, '99')
self.assertEqual(1, len(structures))
self.assertEqual(structures[0], {
'title': 'Section 99.3 Something Here',
'labels': ['99-3'],
'paragraphs': [],
'page': 765,
'footnote_refs': [],
'children': [{
'title': '3(q)(4) More Info',
'labels': ['99-3-q-4'],
'paragraphs': ['Content 1'],
'page': 765,
'footnote_refs': [],
'children': [{
'title': 'Subheader, Really',
'paragraphs': ['Content 2'],
'footnote_refs': [],
'children': [],
'page': 765
}]
}]
})
def test_build_section_by_section_emphasis(self):
xml = """
<ROOT>
<HD SOURCE="H2">Section 876.23 Title Here</HD>
<P>This sentence has<E T="03">emphasis</E>!</P>
<P>Non emph,<E T="03">emph</E>then more.</P>
<P>This one has an <E T="03">emph</E> with spaces.</P>
</ROOT>"""
sxs_lst = list(etree.fromstring(xml).xpath("/ROOT/*"))
structures = sxs.build_section_by_section(sxs_lst, 23, '876')
paragraphs = structures[0]['paragraphs']
self.assertEqual(paragraphs, [
'This sentence has <em data-original="E-03">emphasis</em>!',
'Non emph, <em data-original="E-03">emph</em> then more.',
'This one has an <em data-original="E-03">emph</em> with spaces.'
])
def test_build_section_by_section_footnotes_full(self):
xml = """
<ROOT>
<HD SOURCE="H2">Section 876.23 Title Here</HD>
<P>Sometimes<E T="03">citations</E><SU>5</SU><FTREF /></P>
<P>Are rather complicated</P>
<FTNT><P><SU>5</SU>Footnote contents</P></FTNT>
</ROOT>"""
sxs_lst = list(etree.fromstring(xml).xpath("/ROOT/*"))
structures = sxs.build_section_by_section(sxs_lst, 23, '876')
sometimes_txt = 'Sometimes <em data-original="E-03">citations</em>'
self.assertEqual(structures[0]['paragraphs'], [
sometimes_txt, 'Are rather complicated'
])
self.assertEqual(structures[0]['footnote_refs'],
[{'paragraph': 0,
'reference': '5',
'offset': len(sometimes_txt)}])
def test_build_section_by_section_multiple(self):
xml = """
<ROOT>
<HD SOURCE="H2">Comments 22(a)-5, 22(a)-6, and 22(b)</HD>
<P>Content</P>
</ROOT>"""
sxs_lst = list(etree.fromstring(xml).xpath("/ROOT/*"))
structures = sxs.build_section_by_section(sxs_lst, 23, '876')
self.assertEqual(len(structures), 1)
self.assertEqual(structures[0]['labels'],
['876-22-a-Interp-5', '876-22-a-Interp-6',
'876-22-b-Interp'])
def test_build_section_by_section_repeat_label(self):
xml = """
<ROOT>
<HD SOURCE="H2">This references 23(c)</HD>
<P>Content 1</P>
<HD SOURCE="H3">SO DOES THIS! 23(c) continued</HD>
<P>Content 2</P>
</ROOT>"""
sxs_lst = list(etree.fromstring(xml).xpath("/ROOT/*"))
structures = sxs.build_section_by_section(sxs_lst, 23, '876')
self.assertEqual(len(structures), 1)
struct1 = structures[0]
self.assertEqual(struct1['labels'], ['876-23-c'])
self.assertEqual(['Content 1'], struct1['paragraphs'])
self.assertEqual(len(struct1['children']), 1)
struct2 = struct1['children'][0]
self.assertEqual(['Content 2'], struct2['paragraphs'])
self.assertFalse('labels' in struct2)
# Now the same, but on the same H level
xml = """
<ROOT>
<HD SOURCE="H2">This references 23(c)</HD>
<P>Content 1</P>
<HD SOURCE="H2">SO DOES THIS! 23(c) continued</HD>
<P>Content 2</P>
</ROOT>"""
sxs_lst = list(etree.fromstring(xml).xpath("/ROOT/*"))
structures = sxs.build_section_by_section(sxs_lst, 23, '876')
self.assertEqual(len(structures), 1)
struct1 = structures[0]
self.assertEqual(struct1['labels'], ['876-23-c'])
self.assertEqual(['Content 1'], struct1['paragraphs'])
self.assertEqual(len(struct1['children']), 1)
struct2 = struct1['children'][0]
self.assertEqual(['Content 2'], struct2['paragraphs'])
self.assertFalse('labels' in struct2)
# Semi-repeated
xml = """
<ROOT>
<HD SOURCE="H2">Appendices A and B</HD>
<P>Content 1</P>
<HD SOURCE="H2">Appendix B</HD>
<P>Content 2</P>
</ROOT>"""
sxs_lst = list(etree.fromstring(xml).xpath("/ROOT/*"))
structures = sxs.build_section_by_section(sxs_lst, 23, '876')
self.assertEqual(len(structures), 1)
struct1 = structures[0]
self.assertEqual(struct1['labels'], ['876-A', '876-B'])
self.assertEqual(['Content 1'], struct1['paragraphs'])
self.assertEqual(len(struct1['children']), 1)
struct2 = struct1['children'][0]
self.assertEqual(['Content 2'], struct2['paragraphs'])
self.assertFalse('labels' in struct2)
def test_build_section_by_section_backtrack(self):
xml = """
<ROOT>
<HD SOURCE="H2">This references 23(c)(3)</HD>
<P>Content 1</P>
<HD SOURCE="H2">Off handed comment about 23(c)</HD>
<P>Content 2</P>
</ROOT>"""
sxs_lst = list(etree.fromstring(xml).xpath("/ROOT/*"))
structures = sxs.build_section_by_section(sxs_lst, 23, '876')
self.assertEqual(len(structures), 1)
struct1 = structures[0]
self.assertEqual(struct1['labels'], ['876-23-c-3'])
self.assertEqual(['Content 1'], struct1['paragraphs'])
self.assertEqual(len(struct1['children']), 1)
struct2 = struct1['children'][0]
self.assertEqual(['Content 2'], struct2['paragraphs'])
self.assertFalse('labels' in struct2)
# Same, but deeper H level
xml = """
<ROOT>
<HD SOURCE="H2">This references 23(c)(3)</HD>
<P>Content 1</P>
<HD SOURCE="H3">Off handed comment about 23(c)</HD>
<P>Content 2</P>
</ROOT>"""
sxs_lst = list(etree.fromstring(xml).xpath("/ROOT/*"))
structures = sxs.build_section_by_section(sxs_lst, 23, '876')
self.assertEqual(len(structures), 1)
struct1 = structures[0]
self.assertEqual(struct1['labels'], ['876-23-c-3'])
self.assertEqual(['Content 1'], struct1['paragraphs'])
self.assertEqual(len(struct1['children']), 1)
struct2 = struct1['children'][0]
self.assertEqual(['Content 2'], struct2['paragraphs'])
self.assertFalse('labels' in struct2)
# No part then part
xml = """
<ROOT>
<HD SOURCE="H3">This references 23(c)</HD>
<HD SOURCE="H3">Off handed comment about section 1111.23</HD>
<P>Content 2</P>
</ROOT>"""
sxs_lst = list(etree.fromstring(xml).xpath("/ROOT/*"))
structures = sxs.build_section_by_section(sxs_lst, 22, '1111')
self.assertEqual(len(structures), 1)
struct1 = structures[0]
self.assertEqual(struct1['labels'], ['1111-23-c'])
self.assertEqual([], struct1['paragraphs'])
self.assertEqual(len(struct1['children']), 1)
struct2 = struct1['children'][0]
self.assertEqual(['Content 2'], struct2['paragraphs'])
self.assertFalse('labels' in struct2)
def test_build_section_by_section_different_part(self):
xml = """
<ROOT>
<HD SOURCE="H2">This references Section 1111.23(c)(3)</HD>
<P>Content 1</P>
<HD SOURCE="H2">This one's about 24(c)</HD>
<P>Content 2</P>
</ROOT>"""
sxs_lst = list(etree.fromstring(xml).xpath("/ROOT/*"))
structures = sxs.build_section_by_section(sxs_lst, 23, '876')
self.assertEqual(len(structures), 2)
struct1, struct2 = structures
self.assertEqual(struct1['labels'], ['1111-23-c-3'])
self.assertEqual(['Content 1'], struct1['paragraphs'])
self.assertEqual(len(struct1['children']), 0)
self.assertEqual(struct2['labels'], ['1111-24-c'])
self.assertEqual(['Content 2'], struct2['paragraphs'])
self.assertEqual(len(struct2['children']), 0)
# Same, but deeper H level
xml = """
<ROOT>
<HD SOURCE="H2">This references 23(c)(3)</HD>
<P>Content 1</P>
<HD SOURCE="H3">Off handed comment about 23(c)</HD>
<P>Content 2</P>
</ROOT>"""
sxs_lst = list(etree.fromstring(xml).xpath("/ROOT/*"))
structures = sxs.build_section_by_section(sxs_lst, 23, '876')
self.assertEqual(len(structures), 1)
struct1 = structures[0]
self.assertEqual(struct1['labels'], ['876-23-c-3'])
self.assertEqual(['Content 1'], struct1['paragraphs'])
self.assertEqual(len(struct1['children']), 1)
struct2 = struct1['children'][0]
self.assertEqual(['Content 2'], struct2['paragraphs'])
self.assertFalse('labels' in struct2)
def test_build_section_by_section_dup_child(self):
xml = """
<ROOT>
<HD SOURCE="H2">References 31(a) and (b)</HD>
<P>Content 1</P>
<HD SOURCE="H3">Subcontent</HD>
<P>Content 2</P>
<HD SOURCE="H3">References 31(b)(1)</HD>
<P>Content 3</P>
</ROOT>"""
sxs_lst = list(etree.fromstring(xml).xpath("/ROOT/*"))
structures = sxs.build_section_by_section(sxs_lst, 23, '876')
self.assertEqual(len(structures), 1)
struct1 = structures[0]
self.assertEqual(struct1['labels'], ['876-31-a', '876-31-b'])
self.assertEqual(['Content 1'], struct1['paragraphs'])
self.assertEqual(len(struct1['children']), 2)
struct1_h, struct2 = struct1['children']
self.assertEqual(struct1_h['title'], 'Subcontent')
self.assertEqual(['Content 2'], struct1_h['paragraphs'])
self.assertEqual(len(struct1_h['children']), 0)
self.assertEqual(struct2['labels'], ['876-31-b-1'])
self.assertEqual(['Content 3'], struct2['paragraphs'])
self.assertEqual(len(struct2['children']), 0)
def test_split_into_ttsr(self):
xml = """
<ROOT>
<HD SOURCE="HD3">Section Header</HD>
<P>Content 1</P>
<P>Content 2</P>
<HD SOURCE="HD4">Sub Section Header</HD>
<P>Content 3</P>
<HD SOURCE="HD4">Another</HD>
<P>Content 4</P>
<HD SOURCE="HD3">Next Section</HD>
<P>Content 5</P>
</ROOT>"""
sxs_lst = list(etree.fromstring(xml).xpath("/ROOT/*"))
title, text_els, sub_sects, remaining = sxs.split_into_ttsr(sxs_lst,
'1111')
self.assertEqual("Section Header", title.text)
self.assertEqual(2, len(text_els))
self.assertEqual("Content 1", text_els[0].text)
self.assertEqual("Content 2", text_els[1].text)
self.assertEqual(4, len(sub_sects))
self.assertEqual("Sub Section Header", sub_sects[0].text)
self.assertEqual("Content 3", sub_sects[1].text)
self.assertEqual("Another", sub_sects[2].text)
self.assertEqual("Content 4", sub_sects[3].text)
self.assertEqual(2, len(remaining))
self.assertEqual("Next Section", remaining[0].text)
self.assertEqual("Content 5", remaining[1].text)
def test_add_spaces_to_title(self):
"""Account for wonky titles without proper spacing"""
self.assertEqual('Section 101.23 Some Title',
sxs.add_spaces_to_title('Section 101.23 Some Title'))
self.assertEqual('Section 101.23 Some Title',
sxs.add_spaces_to_title('Section 101.23Some Title'))
self.assertEqual('Section 101.23:Some Title',
sxs.add_spaces_to_title('Section 101.23:Some Title'))
self.assertEqual('Appendix A-Some Title',
sxs.add_spaces_to_title('Appendix A-Some Title'))
self.assertEqual(
'Comment 29(b)(1)-1 Some Title',
sxs.add_spaces_to_title('Comment 29(b)(1)-1Some Title'))
def test_parse_into_labels(self):
self.assertEqual(["101-22"],
sxs.parse_into_labels("Section 101.22Stuff", "101"))
self.assertEqual(["101-22-d"],
sxs.parse_into_labels("22(d) Content", "101"))
self.assertEqual(["101-22-d-5"],
sxs.parse_into_labels("22(d)(5) Content", "101"))
self.assertEqual(["101-22-d-5-x"],
sxs.parse_into_labels("22(d)(5)(x) Content", "101"))
self.assertEqual(
["101-22-d-5-x"],
sxs.parse_into_labels(u"§ 101.22(d)(5)(x) Content", "101"))
self.assertEqual(
["101-22-d-5-x-Q"],
sxs.parse_into_labels("22(d)(5)(x)(Q) Content", "101"))
self.assertEqual(["101-A"],
sxs.parse_into_labels("Appendix A Heading", "101"))
self.assertEqual(
["101-21-c-Interp-1"],
sxs.parse_into_labels("Comment 21(c)-1 Heading", "101"))
text = u'Official Interpretations of § 101.33(c)(2)'
self.assertEqual(['101-33-c-2-Interp'],
sxs.parse_into_labels(text, '101'))
text = 'Comments 33(a)-8 and 33(a)-9'
self.assertEqual(['101-33-a-Interp-8', '101-33-a-Interp-9'],
sxs.parse_into_labels(text, '101'))
self.assertEqual(
[],
sxs.parse_into_labels("Application of this rule", "101"))
text = 'Section 1111.39Content content 1111.39(d) Exeptions'
self.assertEqual(['1111-39', '1111-39-d'],
sxs.parse_into_labels(text, '101'))
text = u"Appendix H—Closed-End Model Forms and Clauses-7(i)"
self.assertEqual(['101-H'], sxs.parse_into_labels(text, '101'))
def test_is_child_of(self):
parent = """<HD SOURCE="H2">Section 22.1</HD>"""
parent = etree.fromstring(parent)
child = """<P>Something</P>"""
self.assertTrue(
sxs.is_child_of(etree.fromstring(child), parent, '1111'))
child = """<HD SOURCE="H3">Something</HD>"""
self.assertTrue(
sxs.is_child_of(etree.fromstring(child), parent, '1111'))
child = """<HD SOURCE="H1">Section 22.2</HD>"""
self.assertFalse(
sxs.is_child_of(etree.fromstring(child), parent, '1111'))
child = """<HD SOURCE="H2">Header without Citation</HD>"""
self.assertTrue(
sxs.is_child_of(etree.fromstring(child), parent, '1111'))
|
cc0-1.0
|
3de1251fd72b47e365b48aaa04e02e59
| 39.326425
| 78
| 0.511671
| 3.677008
| false
| false
| false
| false
|
eregs/regulations-parser
|
regparser/tree/struct.py
|
3
|
11524
|
import hashlib
import re
from json import JSONEncoder
import six
from lxml import etree
from regparser.tree.depth.markers import MARKERLESS
class Node(object):
APPENDIX = u'appendix'
INTERP = u'interp'
REGTEXT = u'regtext'
SUBPART = u'subpart'
EMPTYPART = u'emptypart'
EXTRACT = u'extract'
NOTE = u'note'
INTERP_MARK = 'Interp'
MARKERLESS_REGEX = re.compile(r'p\d+')
def __init__(self, text='', children=None, label=None, title=None,
node_type=REGTEXT, source_xml=None, tagged_text=''):
if children is None:
children = []
if label is None:
label = []
self.text = six.text_type(text)
# defensive copy
self.children = list(children)
self.label = [str(l) for l in label if l != '']
title = six.text_type(title or '')
self.title = title or None
self.node_type = node_type
self.source_xml = source_xml
self.tagged_text = tagged_text
def __repr__(self):
text = ("Node(text={0}, children={1}, label={2}, title={3}, "
"node_type={4})")
return text.format(
repr(self.text), repr(self.children), repr(self.label),
repr(self.title), repr(self.node_type)
)
def __lt__(self, other):
return repr(self) < repr(other)
def __eq__(self, other):
return repr(self) == repr(other)
@property
def cfr_part(self):
if self.label:
return self.label[0]
def label_id(self):
return '-'.join(self.label)
def depth(self):
"""Inspect the label and type to determine the node's depth"""
second = (self.label[1:2] or [""])[0]
second_is_digit = second[:1].isdigit()
is_interp = self.INTERP_MARK in self.label
is_root = len(self.label) <= 1
if self.node_type in (self.SUBPART, self.EMPTYPART):
# Subparts all on the same level
return 2
elif not second_is_digit or is_root or is_interp:
return len(self.label)
else:
# Add one for the subpart level
return len(self.label) + 1
@classmethod
def is_markerless_label(cls, label):
if not label:
return None
return (cls.MARKERLESS_REGEX.match(label[-1]) or
label[-1] == MARKERLESS)
def is_markerless(self):
return bool(self.is_markerless_label(self.label))
def is_section(self):
"""Sections are contained within subparts/subject groups. They are not
part of the appendix"""
return len(self.label) == 2 and self.label[1][:1].isdigit()
def walk(self, fn):
"""See walk(node, fn)"""
return walk(self, fn)
class NodeEncoder(JSONEncoder):
"""Custom JSON encoder to handle Node objects"""
def default(self, obj):
if isinstance(obj, Node):
fields = dict(obj.__dict__)
if obj.title is None:
del fields['title']
for field in ('tagged_text', 'source_xml', 'child_labels'):
if field in fields:
del fields[field]
return fields
return super(NodeEncoder, self).default(obj)
class FullNodeEncoder(JSONEncoder):
"""Encodes Nodes into JSON, not losing any of the fields"""
FIELDS = {'text', 'children', 'label', 'title', 'node_type', 'source_xml',
'tagged_text'}
def default(self, obj):
if isinstance(obj, Node):
result = {field: getattr(obj, field, None)
for field in self.FIELDS}
if obj.source_xml is not None:
result['source_xml'] = etree.tounicode(obj.source_xml)
return result
return super(FullNodeEncoder, self).default(obj)
def full_node_decode_hook(d):
"""Convert a JSON object into a full Node"""
if set(d.keys()) == FullNodeEncoder.FIELDS:
params = dict(d)
node = Node(**params)
if node.source_xml:
node.source_xml = etree.fromstring(node.source_xml)
return node
return d
def frozen_node_decode_hook(d):
"""Convert a JSON object into a FrozenNode"""
if set(d.keys()) == FullNodeEncoder.FIELDS:
params = dict(d)
del params['source_xml']
fresh = FrozenNode(**params)
return fresh.prototype()
return d
def walk(node, fn):
"""Perform fn for every node in the tree. Pre-order traversal. fn must
be a function that accepts a root node."""
result = fn(node)
if result is not None:
results = [result]
else:
results = []
for child in node.children:
results += walk(child, fn)
return results
def filter_walk(node, fn):
"""Perform fn on the label for every node in the tree and return a
list of nodes on which the function returns truthy."""
return walk(node, lambda n: n if fn(n.label) else None)
def find_first(root, predicate):
"""Walk the tree and find the first node which matches the predicate"""
response = walk(root, lambda n: n if predicate(n) else None)
if response:
return response[0]
def find(root, label):
"""Search through the tree to find the node with this label."""
if isinstance(label, Node):
label = label.label_id()
return find_first(root, lambda n: n.label_id() == label)
def find_parent(root, label):
"""Search through the tree to find the _parent_ or a node with this
label."""
if isinstance(label, Node):
label = label.label_id()
def has_child(n):
return any(c.label_id() == label for c in n.children)
return find_first(root, has_child)
def merge_duplicates(nodes):
"""Given a list of nodes with the same-length label, merge any
duplicates (by combining their children)"""
found_pair = None
for lidx, lhs in enumerate(nodes):
for ridx, rhs in enumerate(nodes[lidx + 1:], lidx + 1):
if lhs.label == rhs.label:
found_pair = (lidx, ridx)
if found_pair:
lidx, ridx = found_pair
lhs, rhs = nodes[lidx], nodes[ridx]
lhs.children.extend(rhs.children)
return merge_duplicates(nodes[:ridx] + nodes[ridx + 1:])
else:
return nodes
def treeify(nodes):
"""Given a list of nodes, convert those nodes into the appropriate tree
structure based on their labels. This assumes that all nodes will fall
under a set of 'root' nodes, which have the min-length label."""
if not nodes:
return nodes
min_len, with_min = len(nodes[0].label), []
for node in nodes:
if len(node.label) == min_len:
with_min.append(node)
elif len(node.label) < min_len:
min_len = len(node.label)
with_min = [node]
with_min = merge_duplicates(with_min)
roots = []
for root in with_min:
label = root.label
if root.label[-1] == Node.INTERP_MARK:
label = root.label[:-1]
def is_child(node):
return node.label[:len(label)] == label
children = [n for n in nodes if n.label != root.label and is_child(n)]
root.children = root.children + treeify(children)
roots.append(root)
return roots
class FrozenNode(object):
"""Immutable interface for nodes. No guarantees about internal state."""
_pool = {} # collection of all FrozenNodes, keyed by hash
def __init__(self, text='', children=(), label=(), title='',
node_type=Node.REGTEXT, tagged_text=''):
self._text = text or ''
self._children = tuple(children)
self._label = tuple(label)
self._title = title or ''
self._node_type = node_type
self._tagged_text = tagged_text or ''
self._child_labels = tuple(c.label_id for c in self.children)
self._label_id = '-'.join(self.label)
self._hash = self._generate_hash()
if self.hash not in FrozenNode._pool:
FrozenNode._pool[self.hash] = self
@property
def text(self):
return self._text
@property
def children(self):
return self._children
@property
def label(self):
return self._label
@property
def title(self):
return self._title
@property
def node_type(self):
return self._node_type
@property
def tagged_text(self):
return self._tagged_text
@property
def hash(self):
return self._hash
@property
def label_id(self):
return self._label_id
@property
def child_labels(self):
return self._child_labels
def _generate_hash(self):
"""Called during instantiation. Digests all fields"""
hasher = hashlib.sha256()
hasher.update(self.text.encode('utf-8'))
hasher.update(self.tagged_text.encode('utf-8'))
hasher.update(self.title.encode('utf-8'))
hasher.update(self.label_id.encode('utf-8'))
hasher.update(self.node_type.encode('utf-8'))
for child in self.children:
hasher.update(child.hash.encode('utf-8'))
return hasher.hexdigest()
def __hash__(self):
"""As the hash property is already distinctive, re-use it"""
return hash(self.hash)
def __eq__(self, other):
"""We define equality as having the same fields except for children.
Instead of recursively inspecting them, we compare only their hash
(this is a Merkle tree)"""
return (other.__class__ == self.__class__ and
self.hash == other.hash and
# Compare the fields to limit the effect of hash collisions
self.text == other.text and
self.title == other.title and
self.node_type == other.node_type and
self.tagged_text == other.tagged_text and
self.label_id == other.label_id and
[c.hash for c in self.children] ==
[c.hash for c in other.children])
@staticmethod
def from_node(node):
"""Convert a struct.Node (or similar) into a struct.FrozenNode. This
also checks if this node has already been instantiated. If so, it
returns the instantiated version (i.e. only one of each identical node
exists in memory)"""
children = [FrozenNode.from_node(n) for n in node.children]
fresh = FrozenNode(text=node.text, children=children, label=node.label,
title=node.title or '', node_type=node.node_type,
tagged_text=node.tagged_text)
return fresh.prototype()
# @todo - seems like something we could implement via __new__?
def prototype(self):
"""When we instantiate a FrozenNode, we add it to _pool if we've not
seen an identical FrozenNode before. If we have, we want to work with
that previously seen version instead. This method returns the _first_
FrozenNode with identical fields"""
return FrozenNode._pool[self.hash] # note this may not be self
def clone(self, **kwargs):
"""Implement a namedtuple `_replace` style functionality, copying all
fields that aren't explicitly replaced."""
for field in ('text', 'children', 'label', 'title', 'node_type',
'tagged_text'):
kwargs[field] = kwargs.get(field, getattr(self, field))
fresh = FrozenNode(**kwargs)
return fresh.prototype()
|
cc0-1.0
|
3d461f627e42a8423a2502e6ccf84a40
| 31.370787
| 79
| 0.590073
| 3.909091
| false
| false
| false
| false
|
eregs/regulations-parser
|
regparser/grammar/tokens.py
|
3
|
4697
|
""" Set of Tokens to be used when parsing.
@label is a list describing the depth of a paragraph/context. It follows:
[ Part, Subpart/Appendix/Interpretations, Section, p-level-1, p-level-2,
p-level-3, p-level4, p-level5 ]
"""
import attr
import six
def uncertain_label(label_parts):
"""Convert a list of strings/Nones to a '-'-separated string with question
markers to replace the Nones. We use this format to indicate
uncertainty"""
return '-'.join(p or '?' for p in label_parts)
def _none_str(value):
"""Shorthand for displaying a variable as a string or the text None"""
if value is None:
return 'None'
else:
return "'{0}'".format(value)
@attr.attrs(frozen=True)
class Token(object):
"""Base class for all tokens. Provides methods for pattern matching and
copying this token"""
def match(self, *types, **fields):
"""Pattern match. self must be one of the types provided (if they
were provided) and all of the fields must match (if fields were
provided). If a successful match, returns self"""
type_match = not types or any(isinstance(self, typ) for typ in types)
has_fields = not fields or all(hasattr(self, f) for f in fields)
fields_match = not has_fields or all(
getattr(self, f) == v for f, v in fields.items())
return type_match and has_fields and fields_match and self
@attr.attrs(slots=True, frozen=True)
class Verb(Token):
"""Represents what action is taking place to the paragraphs"""
verb = attr.attrib()
active = attr.attrib()
and_prefix = attr.attrib(default=False)
PUT = 'PUT'
POST = 'POST'
MOVE = 'MOVE'
DELETE = 'DELETE'
DESIGNATE = 'DESIGNATE'
RESERVE = 'RESERVE'
KEEP = 'KEEP'
INSERT = 'INSERT'
@attr.attrs(slots=True, frozen=True)
class Context(Token):
"""Represents a bit of context for the paragraphs. This gets compressed
with the paragraph tokens to define the full scope of a paragraph. To
complicate matters, sometimes what looks like a Context is actually the
entity which is being modified (i.e. a paragraph). If we are certain
that this is only context, (e.g. "In Subpart A"), use 'certain'"""
# replace with Nones
label = attr.attrib(convert=lambda label: [p or None for p in label])
certain = attr.attrib(default=False)
@attr.attrs(slots=True, frozen=True)
class Paragraph(Token):
"""Represents an entity which is being modified by the amendment. Label
is a way to locate this paragraph (though see the above note). We might
be modifying a field of a paragraph (e.g. intro text only, or title
only;) if so, set the `field` parameter."""
label = attr.attrib(default=attr.Factory(list))
field = attr.attrib(default=None)
TEXT_FIELD = 'text'
HEADING_FIELD = 'title'
KEYTERM_FIELD = 'heading'
@classmethod
def make(cls, label=None, field=None, part=None, sub=None, section=None,
paragraphs=None, paragraph=None, subpart=None, is_interp=None,
appendix=None):
"""label and field are the only "materialized" fields. Everything
other field becomes part of the label, offering a more legible API.
Particularly useful for writing tests"""
if sub is None and subpart:
if isinstance(subpart, six.string_types):
sub = 'Subpart:{0}'.format(subpart)
else:
sub = 'Subpart'
if sub is None and is_interp:
sub = 'Interpretations'
if sub is None and appendix:
sub = 'Appendix:' + appendix
if paragraph:
paragraphs = [paragraph]
if label is None:
label = [part, sub, section] + (paragraphs or [])
# replace with Nones
label = [p or None for p in label]
# Trim the right side of the list
while label and not label[-1]:
label.pop()
return cls(label, field)
def label_text(self):
"""Converts self.label into a string"""
label = uncertain_label(self.label)
if self.field:
label += '[{0}]'.format(self.field)
return label
@attr.attrs(slots=True, frozen=True)
class TokenList(Token):
"""Represents a sequence of other tokens, e.g. comma separated of
created via "through" """
tokens = attr.attrib()
def __iter__(self):
return iter(self.tokens)
@attr.attrs(slots=True, frozen=True)
class AndToken(Token):
"""The word 'and' can help us determine if a Context token should be a
Paragraph token. Note that 'and' might also trigger the creation of a
TokenList, which takes precedent"""
|
cc0-1.0
|
aff4fcdf545c971ba132a1b99c981ce1
| 34.854962
| 78
| 0.642751
| 3.856322
| false
| false
| false
| false
|
eregs/regulations-parser
|
regparser/notice/xml.py
|
2
|
18106
|
"""Functions for processing the xml associated with the Federal Register's
notices"""
import logging
import os
from collections import namedtuple
from datetime import date, datetime
import requests
from cached_property import cached_property
from lxml import etree
from six.moves.urllib.parse import urlparse
from regparser import regs_gov
from regparser.grammar.unified import notice_cfr_p
from regparser.history.delays import delays_in_sentence
from regparser.index.http_cache import http_client
from regparser.notice.amendments.fetch import fetch_amendments
from regparser.notice.citation import Citation
from regparser.notice.dates import fetch_dates
from regparser.tree.xml_parser.xml_wrapper import XMLWrapper
from regparser.web.settings import parser as settings
logger = logging.getLogger(__name__)
TitlePartsRef = namedtuple("TitlePartsRef", ["title", "parts"])
def add_children(el, children):
"""
Given an element and a list of children, recursively appends
children as EREGS_SUBAGENCY elements with the appropriate
attributes, and appends their children to them, etc.
:arg Element el: The XML element to add child elements to.
Should be either EREGS_AGENCY or
EREGS_SUBAGENCY.
:arg list children: dict objects containing the agency information.
Must have subagencies in `children` fields.
:rtype: XML Element
"""
for agency in children:
sub_el = etree.Element("EREGS_SUBAGENCY", **{
"name": str(agency["name"]),
"raw-name": str(agency["raw_name"]),
"agency-id": str(agency["id"])
})
add_children(sub_el, agency["children"])
el.append(sub_el)
return el
def _root_property(attrib, transform=None):
"""We add multiple attributes to the NoticeXML's root element. Account for
data transforms (e.g. to an integer)"""
def getter(self):
value = self.xml.attrib.get(attrib)
if transform and value is not None:
return transform(value)
return value
def setter(self, value):
self.xml.attrib[attrib] = str(value)
return property(getter, setter)
class NoticeXML(XMLWrapper):
"""Wrapper around a notice XML which provides quick access to the XML's
encoded data fields"""
def delays(self):
"""Pull out FRDelays found in the DATES tag"""
dates_str = "".join(p.text for p in self.xpath(
"(//DATES/P)|(//EFFDATE/P)") if p.text)
return [delay for sent in dates_str.split('.')
for delay in delays_in_sentence(sent)]
def _set_date_attr(self, date_type, value):
"""Modify the XML tree so that it contains meta data for a date
field. Accepts both strings and dates"""
dates_tag = self._find_or_create('DATES')
if isinstance(value, date):
value = value.isoformat()
if value is None:
value = ''
dates_tag.attrib["eregs-{0}-date".format(date_type)] = value
def derive_rins(self):
"""Extract regulatory id numbers from the XML (in the RINs tag)"""
xml_rins = self.xpath('//RIN')
for xml_rin in xml_rins:
rin = xml_rin.text.replace("RIN", "").strip()
yield rin
def derive_docket_ids(self):
"""Extract docket numbers from the XML (in the DEPDOC tag)"""
docket_ids = []
xml_did_els = self.xpath('//DEPDOC')
for xml_did_el in xml_did_els:
did_str = xml_did_el.text.replace("[", "").replace("]", "")
docket_ids.extend([d.strip() for d in did_str.split(";")])
return docket_ids
def set_agencies(self, agencies=None):
"""
SIDE EFFECTS: this operates on the XML of the NoticeXML itself as well
as returning some information.
Adds elements to the NoticeXML to reflect information about the
agencies connected to to notice.
Looks for that information in a list of dicts passed in as
``agencies``, then adds it to the beginning of the XML as a set of
elements that will look something like this::
<EREGS_AGENCIES>
<EREGS_AGENCY name="x" agency-id="00" raw-name="X">
<EREGS_SUBAGENCY name="y" agency-id="01" raw-name="Y">
</EREGS_SUBAGENCY>
</EREGS_AGENCY>
</EREGS_AGENCIES>
:arg list agencies: dict objects containing agency information,
including ``id``, ``parent_id``, ``name``, and
``raw_name``.
:rtype: dict
:returns: A dict of ``id``: ``defaultdict``, where the id is
the id of the agency, and the ``defaultdicts`` are nested
to reflect their parent/child relationships.
"""
if not agencies:
# The FR Notice XML doesn't tend to have all the metadata we need
# contained within it, so don't try to parse that, just log an
# error.
logger.warning("Preprocessing notice: no agency metadata.")
return {}
# We need turn turn the references to parent_ids into a tree of dicts
# that contain subagencies in children fields:
for agency in agencies:
agency["children"] = []
agency_map = {agency["id"]: agency for agency in agencies}
child_keys = []
for key in agency_map:
agency = agency_map[key]
if agency.get("parent_id") and agency["parent_id"] in agency_map:
agency_map[agency["parent_id"]]["children"].append(agency)
child_keys.append(key)
for key in child_keys:
del agency_map[key]
# Add the elements, starting with a parent ``EREGS_AGENCIES`` element.
agencies_el = etree.Element("EREGS_AGENCIES")
for agency_id in agency_map:
agency = agency_map[agency_id]
has_parent = agency.get("parent_id")
tag = "EREGS_SUBAGENCY" if has_parent else "EREGS_AGENCY"
agency_el = etree.Element(tag, **{
"name": str(agency["name"]),
"raw-name": str(agency["raw_name"]),
"agency-id": str(agency["id"])
})
add_children(agency_el, agency.get("children", []))
agencies_el.append(agency_el)
self.xml.insert(0, agencies_el)
return agency_map
def derive_cfr_refs(self):
"""Pull out CFR information from the CFR tag"""
for cfr_elm in self.xpath('//CFR'):
result = notice_cfr_p.parseString(cfr_elm.text)
yield TitlePartsRef(result.cfr_title, list(result.cfr_parts))
def _derive_date_type(self, date_type):
"""Attempt to parse comment closing date from DATES tags. Returns a
datetime.date and sets the corresponding field"""
dates = fetch_dates(self.xml) or {}
if date_type in dates:
comments = datetime.strptime(
dates[date_type][0], "%Y-%m-%d").date()
return comments
def derive_closing_date(self):
return self._derive_date_type('comments')
def derive_effective_date(self):
return self._derive_date_type('effective')
def _get_date_attr(self, date_type):
"""Pulls out the date set in `set_date_attr`, as a datetime.date. If
not present, returns None"""
value = self.xpath(".//DATES")[0].get('eregs-{0}-date'.format(
date_type))
if value:
return datetime.strptime(value, "%Y-%m-%d").date()
def derive_where_needed(self):
"""A handful of fields might be parse-able from the original XML. If
we don't have values through modification, derive them here"""
if not self.comments_close_on:
self.comments_close_on = self.derive_closing_date()
if not self.rins:
self.rins = self.derive_rins()
if not self.cfr_refs:
self.cfr_refs = self.derive_cfr_refs()
if not self.effective:
self.effective = self.derive_effective_date()
if not self.docket_ids:
self.docket_ids = self.derive_docket_ids()
supporting = self.supporting_documents
needs_supporting = not supporting
for docket_id in self.docket_ids:
proposal = regs_gov.proposal(docket_id, self.version_id)
if proposal and not self.comment_doc_id:
self.comment_doc_id = proposal.regs_id
if proposal and not self.primary_docket:
self.primary_docket = docket_id
if needs_supporting:
supporting.extend(regs_gov.supporting_docs(docket_id))
self.supporting_documents = supporting
# --- Setters/Getters for specific fields. ---
# We encode relevant information within the XML, but wish to provide easy
# access
@property
def rins(self):
return [_.attrib['rin'] for _ in self.xpath("//EREGS_RIN")]
@rins.setter
def rins(self, value):
"""
Modify the XML tree so that it contains meta data for regulation id
numbers.
The Federal Register API implies that documents can have more than one.
The XML we're adding will look something like this::
<EREGS_RINS>
<EREGS_RIN rin="2050-AG65" />
</EREGS_RINS>
:arg list value: RINs, which should be strings.
"""
rins_el = self._find_or_create('EREGS_RINS')
for rin in value:
etree.SubElement(rins_el, "EREGS_RIN", rin=rin)
@property
def docket_ids(self):
return [_.attrib['docket_id'] for _ in self.xpath("//EREGS_DOCKET_ID")]
@docket_ids.setter
def docket_ids(self, value):
"""
Modify the XML tree so that it contains meta data for docket ids.
The XML we're adding will look something like this::
<EREGS_DOCKET_IDS>
<EREGS_DOCKET_ID docket_id="EPA-HQ-SFUND-2010-1086" />
<EREGS_DOCKET_ID docket_id="FRL-9925-69-OLEM" />
</EREGS_DOCKET_IDS>
:arg list value: docket_ids, which should be strings.
"""
dids_el = self._find_or_create('EREGS_DOCKET_IDS')
for docket_id in value:
etree.SubElement(dids_el, "EREGS_DOCKET_ID", docket_id=docket_id)
@property
def cfr_refs(self):
refs = []
for title_el in self.xpath("//EREGS_CFR_TITLE_REF"):
parts = title_el.xpath("EREGS_CFR_PART_REF")
parts = [int(p.attrib["part"]) for p in parts]
refs.append(TitlePartsRef(title=int(title_el.attrib["title"]),
parts=parts))
return refs
@cfr_refs.setter
def cfr_refs(self, value):
"""
Transform the XML to include elements that look like this::
<EREGS_CFR_REFS>
<EREGS_CFR_TITLE_REF title="40">
<EREGS_CFR_PART_REF part="300" />
<EREGS_CFR_PART_REF part="310" />
</EREGS_CFR_TITLE_REF>
</EREGS_CFR_REFS>
:arg list value: List of TitlePartsRef elements
"""
refs_el = etree.Element("EREGS_CFR_REFS")
for ref in value:
el = etree.SubElement(refs_el, "EREGS_CFR_TITLE_REF",
title=str(ref.title))
for part in ref.parts:
etree.SubElement(el, "EREGS_CFR_PART_REF", part=str(part))
self.xml.insert(0, refs_el)
@property
def cfr_ref_pairs(self):
return [(ref.title, part)
for ref in self.cfr_refs for part in ref.parts]
@property
def comments_close_on(self):
return self._get_date_attr('comments-close-on')
@comments_close_on.setter
def comments_close_on(self, value):
self._set_date_attr('comments-close-on', value)
@property
def effective(self):
return self._get_date_attr('effective')
@effective.setter
def effective(self, value):
self._set_date_attr('effective', value)
@property
def published(self):
return self._get_date_attr('published')
@published.setter
def published(self, value):
self._set_date_attr('published', value)
@cached_property # rather expensive operation, so cache results
def amendments(self):
"""Getter for relevent amendments.
:rtype: list of amendments
"""
try:
amendments = fetch_amendments(self.xml)
except: # noqa
logger.error('Unable to fetch amendments for %s', self.version_id)
return []
return amendments
@property
def fr_citation(self):
return Citation(self.fr_volume, self.start_page)
@property
def title(self):
return self.xpath('//SUBJECT')[0].text
@property
def primary_agency(self):
return self.xpath('//AGENCY')[0].text
@property
def supporting_documents(self):
""":rtype: list of regs_gov.RegsGovDoc"""
return [regs_gov.RegsGovDoc(**s.attrib)
for s in self.xpath('//EREGS_SUPPORTING_DOC')]
@supporting_documents.setter
def supporting_documents(self, value):
"""A docket consists of multiple, related documents. The most
important is generally the proposal and/or final rule, but there are
often supporting documents we need to link to.
Modify the XML to look like::
<EREGS_SUPPORTING_DOCS>
<EREGS_SUPPORTING_DOC
regs_id="EPA-HQ-SFUND-2010-1086-0001"
title="Title goes here" />
<EREGS_SUPPORTING_DOC
regs_id="EPA-HQ-SFUND-2010-1086-0002"
title="Title goes here" />
</EREGS_SUPPORTING_DOCS>
:arg list value: list of regs_gov.RegsGovDocs
"""
container = self._find_or_create('EREGS_SUPPORTING_DOCS')
for doc in value:
etree.SubElement(container, 'EREGS_SUPPORTING_DOC',
**doc._asdict())
version_id = _root_property('eregs-version-id')
fr_html_url = _root_property('fr-html-url')
comment_doc_id = _root_property('eregs-comment-doc-id')
primary_docket = _root_property('eregs-primary-docket')
fr_volume = _root_property('fr-volume', int)
start_page = _root_property('fr-start-page', int)
end_page = _root_property('fr-end-page', int)
def as_dict(self):
"""We use JSON to represent notices in the API. This converts the
relevant data into a dictionary to get one step closer. Unfortunately,
that design assumes a single cfr_part"""
cfr_ref = self.cfr_refs[0]
notice = {'amendments': self.amendments,
'cfr_parts': [str(part) for part in cfr_ref.parts],
'cfr_title': cfr_ref.title,
'dockets': self.docket_ids,
'document_number': self.version_id,
'fr_citation': self.fr_citation.formatted(),
'fr_url': self.fr_html_url,
'fr_volume': self.fr_volume,
# @todo - SxS depends on this; we should remove soon
'meta': {'start_page': self.start_page},
'primary_agency': self.primary_agency,
'primary_docket': self.primary_docket,
'publication_date': self.published.isoformat(),
'regulation_id_numbers': self.rins,
'supporting_documents': [
d._asdict() for d in self.supporting_documents],
'title': self.title}
if self.comments_close_on:
notice['comments_close'] = self.comments_close_on.isoformat()
if self.effective:
notice['effective_on'] = self.effective.isoformat()
if self.comment_doc_id:
notice['comment_doc_id'] = self.comment_doc_id
return notice
def local_copies(url):
"""Use any local copies (potentially with modifications of the FR XML)"""
parsed_url = urlparse(url)
path = parsed_url.path.replace('/', os.sep)
notice_dir_suffix, file_name = os.path.split(path)
for xml_path in settings.LOCAL_XML_PATHS:
if os.path.isfile(xml_path + path):
return [xml_path + path]
else:
prefix = file_name.split('.')[0]
notice_directory = xml_path + notice_dir_suffix
notices = []
if os.path.exists(notice_directory):
notices = os.listdir(notice_directory)
relevant_notices = [os.path.join(notice_directory, n)
for n in notices if n.startswith(prefix)]
if relevant_notices:
return relevant_notices
return []
def notice_xmls_for_url(notice_url):
"""Find, preprocess, and return the XML(s) associated with a particular FR
notice url"""
local_notices = local_copies(notice_url)
if local_notices:
logger.info("using local xml for %s", notice_url)
for local_notice_file in local_notices:
with open(local_notice_file, 'rb') as f:
yield NoticeXML(f.read(), local_notice_file).preprocess()
else:
# ignore initial slash
path_parts = urlparse(notice_url).path[1:].split('/')
client = http_client()
first_try_url = settings.XML_REPO_PREFIX + '/'.join(path_parts)
logger.info('trying to fetch notice xml from %s', first_try_url)
response = client.get(first_try_url)
if response.status_code != requests.codes.ok:
logger.info('failed. fetching from %s', notice_url)
response = client.get(notice_url)
yield NoticeXML(response.content, notice_url).preprocess()
def xmls_for_url(notice_url):
# @todo: remove the need for this function
return [notice_xml.xml for notice_xml in notice_xmls_for_url(notice_url)]
|
cc0-1.0
|
36c2685616672a99be5dbecdb98d39b2
| 36.958071
| 79
| 0.589473
| 3.764241
| false
| false
| false
| false
|
eregs/regulations-parser
|
tests/commands_fetch_sxs_tests.py
|
3
|
2201
|
# @todo - right now this is a copy-paste from parse_rule_changes. SxS and
# rule changes will develop different data structures, however, so these files
# will diverge soon
from unittest import TestCase
import pytest
from click.testing import CliRunner
from lxml import etree
from mock import patch
from regparser.commands.fetch_sxs import fetch_sxs
from regparser.index import dependency, entry
from regparser.notice.xml import NoticeXML
from regparser.test_utils.xml_builder import XMLBuilder
@pytest.mark.django_db
class CommandsFetchSxSTests(TestCase):
def setUp(self):
super(CommandsFetchSxSTests, self).setUp()
self.cli = CliRunner()
with XMLBuilder("ROOT") as ctx:
ctx.PRTPAGE(P="1234")
with ctx.EREGS_CFR_REFS():
with ctx.EREGS_CFR_TITLE_REF(title="12"):
ctx.EREGS_CFR_PART_REF(part="1000")
self.notice_xml = NoticeXML(ctx.xml)
def test_missing_notice(self):
"""If the necessary notice XML is not present, we should expect a
dependency error"""
with self.cli.isolated_filesystem():
result = self.cli.invoke(fetch_sxs, ['1111'])
self.assertTrue(isinstance(result.exception, dependency.Missing))
@patch('regparser.commands.fetch_sxs.build_notice')
@patch('regparser.commands.fetch_sxs.meta_data')
def test_writes(self, meta_data, build_notice):
"""If the notice XML is present, we write the parsed version to disk,
even if that version's already present"""
with self.cli.isolated_filesystem():
entry.Notice('1111').write(self.notice_xml)
self.cli.invoke(fetch_sxs, ['1111'])
meta_data.return_value = {'example': 1}
self.assertTrue(build_notice.called)
args, kwargs = build_notice.call_args
self.assertTrue(args[2], {'example': 1})
self.assertTrue(
isinstance(kwargs['xml_to_process'], etree._Element))
build_notice.reset_mock()
entry.Entry('rule_changes', '1111').write(b'content')
self.cli.invoke(fetch_sxs, ['1111'])
self.assertTrue(build_notice.called)
|
cc0-1.0
|
71f08f8b39d107d47f3df0ea33537d0f
| 39.759259
| 78
| 0.652431
| 3.788296
| false
| true
| false
| false
|
eregs/regulations-parser
|
regparser/tree/depth/optional_rules.py
|
3
|
4861
|
"""Depth derivation has a mechanism for _optional_ rules. This module contains
a collection of such rules. All functions should accept two parameters; the
latter is a list of all variables in the system; the former is a function
which can be used to constrain the variables. This allows us to define rules
over subsets of the variables rather than all of them, should that make our
constraints more useful"""
from constraint import InSetConstraint
from regparser.tree.depth import markers
from regparser.tree.depth.rules import _level_and_children, ancestors
def depth_type_inverses(constrain, all_variables):
"""If paragraphs are at the same depth, they must share the same type. If
paragraphs are the same type, they must share the same depth"""
def inner(typ, idx, depth, *all_prev):
if typ == markers.stars or typ == markers.markerless:
return True
for i in range(0, len(all_prev), 3):
prev_typ, prev_idx, prev_depth = all_prev[i:i + 3]
if prev_depth == depth and prev_typ not in (markers.stars, typ,
markers.markerless):
return False
if prev_typ == typ and prev_depth != depth:
return False
return True
for i in range(0, len(all_variables), 3):
constrain(inner, all_variables[i:i + 3] + all_variables[:i])
def star_new_level(constrain, all_variables):
"""STARS should never have subparagraphs as it'd be impossible to
determine where in the hierarchy these subparagraphs belong.
@todo: This _probably_ should be a general rule, but there's a test that
this breaks in the interpretations. Revisit with CFPB regs"""
def inner(prev_typ, prev_depth, typ, depth):
return not (prev_typ == markers.stars and depth == prev_depth + 1)
for i in range(3, len(all_variables), 3):
prev_typ, prev_depth = all_variables[i - 3], all_variables[i - 1]
typ, depth = all_variables[i], all_variables[i + 2]
constrain(inner, [prev_typ, prev_depth, typ, depth])
def stars_occupy_space(constrain, all_variables):
"""Star markers can't be ignored in sequence, so 1, *, 2 doesn't make
sense for a single level, unless it's an inline star. In the inline
case, we can think of it as 1, intro-text-to-1, 2"""
def per_level(elements):
level, grouped_children = _level_and_children(elements)
if not level:
return True # Base Case
last_idx, last_typ = -1, None
for typ, idx, _ in level:
if typ == markers.stars:
if idx == 0: # STARS_TAG, not INLINE_STARS
last_idx += 1
# sequences must be increasing. Exception for markerless
elif (last_idx >= idx and
markers.markerless not in (last_typ, typ)):
return False
else:
last_idx = idx
last_typ = typ
for children in grouped_children: # Recurse
if not per_level(children):
return False
return True
def inner(*all_vars):
elements = [tuple(all_vars[i:i + 3])
for i in range(0, len(all_vars), 3)]
return per_level(elements)
constrain(inner, all_variables)
def limit_paragraph_types(*p_types):
"""Constraint paragraphs to a limited set of paragraph types. This can
reduce the search space if we know (for example) that the text comes from
regulations and hence does not have capitalized roman numerals"""
def constrainer(constrain, all_variables):
types = [all_variables[i] for i in range(0, len(all_variables), 3)]
constrain(InSetConstraint(p_types), types)
return constrainer
def limit_sequence_gap(size=0):
"""We've loosened the rules around sequences of paragraphs so that
paragraphs can be skipped. This allows arbitrary tightening of that rule,
effectively allowing gaps of a limited size"""
gap_size = size + 1 # we'll always want the difference to be >= 1
def inner(typ, idx, depth, *all_prev):
ancestor_markers = ancestors(all_prev)
# Continuing a sequence or becoming more shallow
if depth < len(ancestor_markers):
# Find the previous marker at this depth
prev_typ, prev_idx, prev_depth = ancestor_markers[depth]
types = {prev_typ, typ}
special_types = {markers.stars, markers.markerless}
if not special_types & types and prev_typ == typ:
return idx > prev_idx and idx - prev_idx <= gap_size
return True
def constrainer(constrain, all_variables):
for i in range(0, len(all_variables), 3):
constrain(inner, all_variables[i:i + 3] + all_variables[:i])
return constrainer
|
cc0-1.0
|
5e72b0759e23676cae530cb5ae2a5292
| 41.269565
| 78
| 0.630323
| 3.961695
| false
| false
| false
| false
|
eregs/regulations-parser
|
regparser/web/management/commands/eregs.py
|
2
|
1246
|
import logging
import os
import sys
import click
import coloredlogs
import ipdb
from django.core import management
from django.db import connections
from django.db.migrations.loader import MigrationLoader
from djclick.adapter import BaseRegistrator, DjangoCommandMixin
from regparser.commands.retry import RetryingCommand
DEFAULT_LOG_FORMAT = "%(asctime)s %(name)-40s %(message)s"
class DjangoCommandRegistrator(BaseRegistrator):
"""Class which registers a command with Django. Uses the base classes
provided by djclick"""
cls = type('RetryDjangoCommand', (DjangoCommandMixin, RetryingCommand), {})
@DjangoCommandRegistrator()
@click.option('--debug/--no-debug', default=False)
def cli(debug):
log_level = logging.INFO
if debug:
log_level = logging.DEBUG
sys.excepthook = lambda t, v, tb: ipdb.post_mortem(tb)
coloredlogs.install(
level=log_level,
fmt=os.getenv("COLOREDLOGS_LOG_FORMAT", DEFAULT_LOG_FORMAT))
connection = connections['default']
loader = MigrationLoader(connection, ignore_no_migrations=True)
all_migrations = set(loader.disk_migrations.keys())
if all_migrations != loader.applied_migrations:
management.call_command('migrate', noinput=True)
|
cc0-1.0
|
e3f66131fa0c7cc7ed252a0a6ceb999c
| 30.948718
| 79
| 0.741573
| 3.857585
| false
| false
| false
| false
|
eregs/regulations-parser
|
regparser/tree/gpo_cfr/section.py
|
3
|
9764
|
# -*- coding: utf-8 -*-
import re
import pyparsing
import six
from regparser.citations import remove_citation_overlaps
from regparser.grammar import unified
from regparser.grammar.utils import QuickSearchable
from regparser.tree.depth import markers as mtypes
from regparser.tree.depth import optional_rules
from regparser.tree.paragraph import p_level_of, p_levels
from regparser.tree.reg_text import build_empty_part
from regparser.tree.struct import Node
from regparser.tree.xml_parser import (flatsubtree_processor, import_category,
matchers, note_processor,
paragraph_processor, tree_utils)
def _deeper_level(first, second):
"""Is the second marker deeper than the first"""
for level1 in p_level_of(first):
for level2 in p_level_of(second):
if level1 < level2:
return True
return False
def _continues_collapsed(first, second):
"""Does the second marker continue a sequence started by the first?"""
if second == mtypes.STARS_TAG: # Missing data - proceed optimistically
return True
for level1, markers1 in enumerate(p_levels):
for level2, markers2 in enumerate(p_levels):
if first not in markers1 or second not in markers2:
continue
idx1, idx2 = markers1.index(first), markers2.index(second)
extending = level1 == level2 and idx2 == idx1 + 1
new_level = level2 == level1 + 1 and idx2 == 0
if extending or new_level:
return True
return False
def get_markers(text, following_marker=None):
""" Extract all the paragraph markers from text. Do some checks on the
collapsed markers."""
initial = initial_markers(text)
if following_marker is None:
collapsed = []
else:
collapsed = collapsed_markers(text)
# Check that the collapsed markers make sense:
# * at least one level below the initial marker
# * followed by a marker in sequence
if initial and collapsed:
collapsed = [c for c in collapsed if _deeper_level(initial[-1], c)]
for marker in reversed(collapsed):
if _continues_collapsed(marker, following_marker):
break
else:
collapsed.pop()
return initial + collapsed
def _any_depth_parse(match):
"""Convert any_depth_p match into the appropriate marker strings"""
markers = [match.p1, match.p2, match.p3, match.p4, match.p5, match.p6]
for idx in (4, 5):
if markers[idx]:
markers[idx] = mtypes.emphasize(markers[idx])
return [m for m in markers if m]
any_depth_p = unified.any_depth_p.copy().setParseAction(_any_depth_parse)
def initial_markers(text):
"""Pull out a list of the first paragraph markers, i.e. markers before any
text"""
try:
return list(any_depth_p.parseString(text))
except pyparsing.ParseException:
return []
_collapsed_grammar = QuickSearchable(
# A guard to reduce false positives
pyparsing.Suppress(pyparsing.Regex(u',|\\.|-|—|>|means ')) +
any_depth_p)
def collapsed_markers(text):
"""Not all paragraph markers are at the beginning of of the text. This
grabs inner markers like (1) and (i) here:
(c) cContent —(1) 1Content (i) iContent"""
potential = [triplet for triplet in _collapsed_grammar.scanString(text)]
# remove any that overlap with citations
potential = [trip for trip in remove_citation_overlaps(text, potential)]
# flatten the results
potential = [pm for pms, _, _ in potential for pm in pms]
# remove any matches that aren't (a), (1), (i), etc. -- All other
# markers can't be collapsed
first_markers = [level[0] for level in p_levels]
potential = [pm for pm in potential if pm in first_markers]
return potential
def build_from_section(reg_part, section_xml):
section_no = section_xml.xpath('SECTNO')[0].text
subject_xml = section_xml.xpath('SUBJECT')
if not subject_xml:
subject_xml = section_xml.xpath('RESERVED')
subject_text = (subject_xml[0].text or '').strip()
section_nums = []
for match in re.finditer(r'{0}\.(\d+[a-z]*)'.format(reg_part), section_no):
secnum_candidate = match.group(1)
if secnum_candidate.isdigit():
secnum_candidate = int(secnum_candidate)
section_nums.append(secnum_candidate)
# Merge spans longer than 3 sections
section_span_end = None
if (len(section_nums) == 2 and section_no[:2] == u'§§'
and '-' in section_no):
first, last = section_nums
if last - first + 1 > 3:
section_span_end = str(last)
section_nums = [first]
else:
section_nums = []
for i in range(first, last + 1):
section_nums.append(i)
section_nodes = []
for section_number in section_nums:
section_number = str(section_number)
section_text = (section_xml.text or '').strip()
tagged_section_text = section_xml.text
if section_span_end:
section_title = u"§§ {0}.{1}-{2}".format(
reg_part, section_number, section_span_end)
else:
section_title = u"§ {0}.{1}".format(reg_part, section_number)
if subject_text:
section_title += " " + subject_text
sect_node = Node(
section_text, label=[reg_part, section_number],
title=section_title, tagged_text=tagged_section_text
)
section_nodes.append(
RegtextParagraphProcessor().process(section_xml, sect_node)
)
return section_nodes
def next_marker(xml):
"""Find the first marker in a paragraph that follows this xml node.
May return None"""
good_tags = ('P', 'FP', mtypes.STARS_TAG)
node = xml.getnext()
while node is not None and node.tag not in good_tags:
node = node.getnext()
if getattr(node, 'tag', None) == mtypes.STARS_TAG:
return mtypes.STARS_TAG
elif node is not None:
tagged_text = tree_utils.get_node_text_tags_preserved(node)
markers = get_markers(tagged_text.strip())
if markers:
return markers[0]
def split_by_markers(xml):
"""Given an xml node, pull out triplets of
(marker, plain-text following, text-with-tags following)
for each subparagraph found"""
plain_text = tree_utils.get_node_text(xml, add_spaces=True).strip()
tagged_text = tree_utils.get_node_text_tags_preserved(xml).strip()
markers_list = get_markers(tagged_text, next_marker(xml))
plain_markers = ['({0})'.format(mtypes.deemphasize(m))
for m in markers_list]
node_texts = tree_utils.split_text(plain_text, plain_markers)
tagged_texts = tree_utils.split_text(
tagged_text, ['({0})'.format(m) for m in markers_list])
if len(node_texts) > len(markers_list): # due to initial MARKERLESS
markers_list.insert(0, mtypes.MARKERLESS)
return list(zip(markers_list, node_texts, tagged_texts))
class ParagraphMatcher(paragraph_processor.BaseMatcher):
"""<P>/<FP> with or without initial paragraph markers -- (a)(1)(i) etc."""
def matches(self, xml):
return xml.tag in ('P', 'FP')
def derive_nodes(self, xml, processor=None):
nodes = []
plain_text = ''
for marker, plain_text, tagged_text in split_by_markers(xml):
nodes.append(Node(
text=plain_text.strip(), label=[marker], source_xml=xml,
tagged_text=six.text_type(tagged_text.strip())
))
if plain_text.endswith('* * *'): # last in loop
nodes.append(Node(label=[mtypes.INLINE_STARS]))
return nodes
class RegtextParagraphProcessor(paragraph_processor.ParagraphProcessor):
MATCHERS = [paragraph_processor.StarsMatcher(),
paragraph_processor.TableMatcher(),
paragraph_processor.FencedMatcher(),
flatsubtree_processor.FlatsubtreeMatcher(
tags=['EXTRACT'], node_type=Node.EXTRACT),
import_category.ImportCategoryMatcher(),
flatsubtree_processor.FlatsubtreeMatcher(tags=['EXAMPLE']),
paragraph_processor.HeaderMatcher(),
paragraph_processor.GraphicsMatcher(),
ParagraphMatcher(),
note_processor.NoteMatcher(),
paragraph_processor.IgnoreTagMatcher(
'SECTNO', 'SUBJECT', 'CITA', 'SECAUTH', 'APPRO',
'PRTPAGE', 'EAR', 'RESERVED')]
def additional_constraints(self):
return [
optional_rules.depth_type_inverses,
optional_rules.limit_sequence_gap(3),
optional_rules.stars_occupy_space,
] + self.relaxed_constraints()
def relaxed_constraints(self):
return [optional_rules.star_new_level,
optional_rules.limit_paragraph_types(
mtypes.lower, mtypes.upper,
mtypes.ints, mtypes.roman,
mtypes.em_ints, mtypes.em_roman,
mtypes.stars, mtypes.markerless)]
class ParseEmptyPart(matchers.Parser):
"""Create an EmptyPart (a subpart with no name) if we encounter a SECTION
at the top level"""
def matches(self, parent, xml_node):
return xml_node.tag == 'SECTION' and len(parent.label) == 1
def __call__(self, parent, xml_node):
sections = build_from_section(parent.cfr_part, xml_node)
if not parent.children:
parent.children.append(build_empty_part(parent.cfr_part))
parent.children[-1].children.extend(sections)
|
cc0-1.0
|
fb7a4fd38fa4dedbb94d46e2ca25ff54
| 36.375479
| 79
| 0.61999
| 3.845093
| false
| false
| false
| false
|
eregs/regulations-parser
|
regparser/commands/layers.py
|
3
|
3662
|
import logging
import click
from stevedore.extension import ExtensionManager
from regparser.commands import utils
from regparser.index import dependency, entry
logger = logging.getLogger(__name__)
def _init_classes():
"""Avoid leaking state variables by wrapping `LAYER_CLASSES` construction
in a function"""
classes = {}
for doc_type in ('cfr', 'preamble'): # @todo - make this dynamic
namespace = 'eregs_ns.parser.layer.{0}'.format(doc_type)
classes[doc_type] = {
extension.name: extension.plugin
for extension in ExtensionManager(namespace)
}
# For backwards compatibility. @todo - remove in later release
old_namespace = 'eregs_ns.parser.layers'
classes['cfr'].update({
extension.plugin.shorthand: extension.plugin
for extension in ExtensionManager(old_namespace)
})
return classes
LAYER_CLASSES = _init_classes()
def stale_layers(doc_entry, doc_type):
"""Return the name of layer dependencies which are now stale. Limit to a
particular doc_type"""
deps = dependency.Graph()
layer_dir = entry.Layer(doc_type, *doc_entry.path)
for layer_name in LAYER_CLASSES[doc_type]:
# Layers depend on their associated tree
deps.add(layer_dir / layer_name, doc_entry)
if doc_type == 'cfr':
# Meta layer also depends on the version info
deps.add(layer_dir / 'meta', entry.Version(*doc_entry.path))
stale = []
for layer_name in LAYER_CLASSES[doc_type]:
layer_entry = layer_dir / layer_name
deps.validate_for(layer_entry)
if deps.is_stale(layer_entry):
stale.append(layer_name)
return stale
def process_cfr_layers(stale_names, cfr_title, version_entry):
"""Build all of the stale layers for this version, writing them into the
index. Assumes all dependencies have already been checked"""
tree = entry.Tree(*version_entry.path).read()
version = version_entry.read()
layer_dir = entry.Layer.cfr(*version_entry.path)
for layer_name in stale_names:
layer_json = LAYER_CLASSES['cfr'][layer_name](
tree, cfr_title=int(cfr_title), version=version).build()
(layer_dir / layer_name).write(layer_json)
def process_preamble_layers(stale_names, preamble_entry):
"""Build all of the stale layers for this preamble, writing them into the
index. Assumes all dependencies have already been checked"""
tree = preamble_entry.read()
layer_dir = entry.Layer.preamble(*preamble_entry.path)
for layer_name in stale_names:
layer_json = LAYER_CLASSES['preamble'][layer_name](tree).build()
(layer_dir / layer_name).write(layer_json)
@click.command()
@click.option('--cfr_title', type=int, help="Limit to one CFR title")
@click.option('--cfr_part', type=int, help="Limit to one CFR part")
# @todo - allow layers to be passed as a parameter
def layers(cfr_title, cfr_part):
"""Build all layers for all known versions."""
logger.info("Build layers - %s CFR %s", cfr_title, cfr_part)
for tree_entry in utils.relevant_paths(entry.Tree(), cfr_title, cfr_part):
tree_title, tree_part, version_id = tree_entry.path
version_entry = entry.Version(tree_title, tree_part, version_id)
stale = stale_layers(tree_entry, 'cfr')
if stale:
process_cfr_layers(stale, tree_title, version_entry)
if cfr_title is None and cfr_part is None:
for preamble_entry in entry.Preamble().sub_entries():
stale = stale_layers(preamble_entry, 'preamble')
if stale:
process_preamble_layers(stale, preamble_entry)
|
cc0-1.0
|
e465679b239546288e2859e8c409a4c9
| 36.752577
| 78
| 0.666303
| 3.691532
| false
| false
| false
| false
|
noisyboiler/wampy
|
test/integration/roles/test_callers.py
|
1
|
3838
|
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
import datetime
from datetime import date
import pytest
from wampy.backends import get_async_adapter
from wampy.errors import WampyTimeOutError
from wampy.peers.clients import Client
from wampy.roles.callee import callee
from wampy.testing import wait_for_registrations
class DateService(Client):
@callee
def get_todays_date(self):
return datetime.date.today().isoformat()
class HelloService(Client):
@callee
def say_hello(self, name):
message = "Hello {}".format(name)
return message
@callee
def say_greeting(self, name, greeting="hola"):
message = "{greeting} to {name}".format(
greeting=greeting, name=name)
return message
class BinaryNumberService(Client):
@callee
def get_binary(self, integer):
result = bin(integer)
return result
class ReallySlowService(Client):
@callee
def requires_patience(self, wait_in_seconds):
async_ = get_async_adapter()
async_.sleep(wait_in_seconds)
reward_for_waiting = "$$$$"
return reward_for_waiting
@pytest.fixture
def date_service(router):
with DateService(url=router.url) as serv:
wait_for_registrations(serv, 1)
yield
@pytest.fixture
def hello_service(router):
with HelloService(url=router.url):
yield
@pytest.fixture
def binary_number_service(router):
with BinaryNumberService(url=router.url):
yield
@pytest.fixture
def really_slow_service(router):
with ReallySlowService(url=router.url):
yield
class TestClientCall:
def test_call_with_no_args_or_kwargs(self, date_service, router):
client = Client(url=router.url)
with client:
response = client.call("get_todays_date")
today = date.today()
assert response == today.isoformat()
def test_call_with_args_but_no_kwargs(self, hello_service, router):
caller = Client(url=router.url)
with caller:
response = caller.call("say_hello", "Simon")
assert response == "Hello Simon"
def test_call_with_args_and_kwargs(self, hello_service, router):
caller = Client(url=router.url)
with caller:
response = caller.call("say_greeting", "Simon", greeting="watcha")
assert response == "watcha to Simon"
class TestClientRpc:
def test_rpc_with_no_args_but_a_default_kwarg(self, hello_service, router):
caller = Client(url=router.url)
with caller:
response = caller.rpc.say_greeting("Simon")
assert response == "hola to Simon"
def test_rpc_with_args_but_no_kwargs(self, hello_service, router):
caller = Client(url=router.url)
with caller:
response = caller.rpc.say_hello("Simon")
assert response == "Hello Simon"
def test_rpc_with_no_args_but_a_kwarg(self, hello_service, router):
caller = Client(url=router.url)
with caller:
response = caller.rpc.say_greeting("Simon", greeting="goodbye")
assert response == "goodbye to Simon"
class TestCallerTimeout:
@pytest.mark.parametrize("call_timeout, wait, reward", [
(1, 2, None),
(2, 1, "$$$$"),
(0.9, 1.1, None),
(1, 3, None),
])
def test_timeout_values(
self, call_timeout, wait, reward, router, really_slow_service,
):
with Client(url=router.url, call_timeout=call_timeout) as client:
try:
resp = client.rpc.requires_patience(wait_in_seconds=wait)
except WampyTimeOutError:
resp = None
assert resp == reward
|
mpl-2.0
|
a656f066568fe204271d59a1b7e5abf8
| 25.108844
| 79
| 0.637832
| 3.641366
| false
| true
| false
| false
|
noisyboiler/wampy
|
wampy/messages/register.py
|
1
|
1217
|
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
import random
class Register(object):
""" A Callee announces the availability of an endpoint implementing
a procedure with a Dealer by sending a "REGISTER" message.
Message is of the format
``[REGISTER, Request|id, Options|dict, Procedure|uri]``, e.g. ::
[
REGISTER, 25349185, {}, "com.myapp.myprocedure1"
]
"Request" is a random, ephemeral ID chosen by the Callee and
used to correlate the Dealer's response with the request.
"Options" is a dictionary that allows to provide additional
registration request details in a extensible way.
"""
WAMP_CODE = 64
name = "register"
def __init__(self, procedure, options=None):
super(Register, self).__init__()
self.procedure = procedure
self.options = options or {}
self.request_id = random.getrandbits(32)
@property
def message(self):
return [
self.WAMP_CODE, self.request_id, self.options,
self.procedure
]
|
mpl-2.0
|
82d22891734a4faa80ea8441f555ebd9
| 28.682927
| 71
| 0.645029
| 3.990164
| false
| false
| false
| false
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.