repo_name
stringlengths 7
65
| path
stringlengths 5
186
| copies
stringlengths 1
4
| size
stringlengths 4
6
| content
stringlengths 941
973k
| license
stringclasses 14
values | hash
stringlengths 32
32
| line_mean
float64 5
100
| line_max
int64 26
999
| alpha_frac
float64 0.25
0.93
| ratio
float64 1.5
7.35
| autogenerated
bool 1
class | config_or_test
bool 2
classes | has_no_keywords
bool 2
classes | has_few_assignments
bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
mozilla/normandy
|
normandy/recipes/tests/api/v3/test_shield_identicon.py
|
1
|
1396
|
import pytest
from normandy.recipes.api.v3.shield_identicon import Genome
@pytest.fixture
def genome():
seed = 123
return Genome(seed)
class TestGenome(object):
"""
Tests the Genome module by setting the seed to a known value and making sure that the
random choices remain consistent, ie. they do not change over time.
"""
def test_weighted_choice(self, genome):
weighted_options = [
{"weight": 1, "value": "apple"},
{"weight": 2, "value": "orange"},
{"weight": 4, "value": "strawberry"},
]
weighted_choice_values = [
genome.weighted_choice(weighted_options),
genome.weighted_choice(weighted_options),
genome.weighted_choice(weighted_options),
]
assert weighted_choice_values == [
{"weight": 1, "value": "apple"},
{"weight": 2, "value": "orange"},
{"weight": 1, "value": "apple"},
]
def test_emoji(self, genome):
emoji_values = [genome.emoji(), genome.emoji(), genome.emoji()]
assert emoji_values == ["๐
", "๐ฏ", "๐"]
def test_color(self, genome):
color_values = [
genome.color().rgb_color,
genome.color().rgb_color,
genome.color().rgb_color,
]
assert color_values == [(7, 54, 66), (255, 207, 0), (88, 110, 117)]
|
mpl-2.0
|
67377af2923b76d7e308e3982934b69f
| 29.822222
| 89
| 0.553713
| 3.769022
| false
| true
| false
| false
|
mozilla/normandy
|
normandy/recipes/tests/test_checks.py
|
1
|
4881
|
from datetime import timedelta
from django.core.exceptions import ImproperlyConfigured
from django.db.utils import ProgrammingError
import pytest
import requests.exceptions
from normandy.recipes import checks, signing
from normandy.recipes.tests import ActionFactory, RecipeFactory, SignatureFactory, UserFactory
@pytest.mark.django_db
class TestSignaturesUseGoodCertificates(object):
def test_it_works(self):
assert checks.signatures_use_good_certificates(None) == []
def test_it_fails_if_a_signature_does_not_verify(self, mocker, settings):
settings.CERTIFICATES_EXPIRE_EARLY_DAYS = None
recipe = RecipeFactory(approver=UserFactory(), signed=True)
mock_verify_x5u = mocker.patch("normandy.recipes.checks.signing.verify_x5u")
mock_verify_x5u.side_effect = signing.BadCertificate("testing exception")
errors = checks.signatures_use_good_certificates(None)
mock_verify_x5u.assert_called_once_with(recipe.signature.x5u, None)
assert len(errors) == 1
assert errors[0].id == checks.ERROR_BAD_SIGNING_CERTIFICATE
assert recipe.approved_revision.name in errors[0].msg
def test_it_ignores_signatures_without_x5u(self):
recipe = RecipeFactory(approver=UserFactory(), signed=True)
recipe.signature.x5u = None
recipe.signature.save()
actions = ActionFactory(signed=True)
actions.signature.x5u = None
actions.signature.save()
assert checks.signatures_use_good_certificates(None) == []
def test_it_ignores_signatures_not_in_use(self, mocker, settings):
settings.CERTIFICATES_EXPIRE_EARLY_DAYS = None
recipe = RecipeFactory(approver=UserFactory(), signed=True)
SignatureFactory(x5u="https://example.com/bad_x5u") # unused signature
mock_verify_x5u = mocker.patch("normandy.recipes.checks.signing.verify_x5u")
def side_effect(x5u, *args):
if "bad" in x5u:
raise signing.BadCertificate("testing exception")
return True
mock_verify_x5u.side_effect = side_effect
errors = checks.signatures_use_good_certificates(None)
mock_verify_x5u.assert_called_once_with(recipe.signature.x5u, None)
assert errors == []
def test_it_passes_expire_early_setting(self, mocker, settings):
settings.CERTIFICATES_EXPIRE_EARLY_DAYS = 7
recipe = RecipeFactory(approver=UserFactory(), signed=True)
mock_verify_x5u = mocker.patch("normandy.recipes.checks.signing.verify_x5u")
errors = checks.signatures_use_good_certificates(None)
mock_verify_x5u.assert_called_once_with(recipe.signature.x5u, timedelta(7))
assert errors == []
def test_it_reports_x5u_network_errors(self, mocker):
RecipeFactory(approver=UserFactory(), signed=True)
mock_verify_x5u = mocker.patch("normandy.recipes.checks.signing.verify_x5u")
mock_verify_x5u.side_effect = requests.exceptions.ConnectionError
errors = checks.signatures_use_good_certificates(None)
mock_verify_x5u.assert_called_once()
assert len(errors) == 1
assert errors[0].id == checks.ERROR_COULD_NOT_VERIFY_CERTIFICATE
@pytest.mark.django_db
class TestRecipeSignatureAreCorrect:
def test_it_warns_if_a_field_isnt_available(self, mocker):
"""This is to allow for un-applied to migrations to not break running migrations."""
RecipeFactory(approver=UserFactory(), signed=True)
mock_canonical_json = mocker.patch("normandy.recipes.models.Recipe.canonical_json")
mock_canonical_json.side_effect = ProgrammingError("error for testing")
errors = checks.recipe_signatures_are_correct(None)
assert len(errors) == 1
assert errors[0].id == checks.WARNING_COULD_NOT_CHECK_SIGNATURES
@pytest.mark.django_db
class TestActionSignatureAreCorrect:
def test_it_warns_if_a_field_isnt_available(self, mocker):
"""This is to allow for un-applied to migrations to not break running migrations."""
ActionFactory(signed=True)
mock_canonical_json = mocker.patch("normandy.recipes.models.Action.canonical_json")
mock_canonical_json.side_effect = ProgrammingError("error for testing")
errors = checks.action_signatures_are_correct(None)
assert len(errors) == 1
assert errors[0].id == checks.WARNING_COULD_NOT_CHECK_SIGNATURES
class TestRemoteSettingsConfigIsCorrect:
def test_it_warns_if_remote_settings_config_is_incorrect(self, mocker):
mock_check_config = mocker.patch("normandy.recipes.exports.RemoteSettings.check_config")
mock_check_config.side_effect = ImproperlyConfigured("error for testing")
errors = checks.remotesettings_config_is_correct(None)
assert len(errors) == 1
assert errors[0].id == checks.ERROR_REMOTE_SETTINGS_INCORRECT_CONFIG
|
mpl-2.0
|
cf8ba8829b7a8bf7b06807e3c4f10a75
| 45.485714
| 96
| 0.708052
| 3.700531
| false
| true
| false
| false
|
mozilla/normandy
|
normandy/recipes/api/filters.py
|
1
|
4813
|
import django_filters
from rest_framework import serializers
from normandy.recipes.models import Recipe
class EnabledStateFilter(django_filters.Filter):
"""A special case filter for filtering recipes by their enabled state"""
def filter(self, qs, value):
if value is not None:
lc_value = value.lower()
if lc_value in ["true", "1"]:
return qs.only_enabled()
elif lc_value in ["false", "0"]:
return qs.only_disabled()
return qs
class ApprovalStateFilter(django_filters.Filter):
"""A special case filter for filtering approval requests by their approval state"""
def filter(self, qs, value):
if value is None:
return qs
lc_value = value.lower()
if lc_value in ["true", "1", "approved"]:
return qs.filter(approved=True)
elif lc_value in ["false", "0", "rejected"]:
return qs.filter(approved=False)
elif lc_value in ["null", "pending"]:
return qs.filter(approved=None)
class BaselineCapabilitiesFilter(django_filters.Filter):
"""Filters recipe by whether they use only baseline capabilities, defaulting to only baseline."""
def __init__(self, *args, default_only_baseline=False, **kwargs):
super().__init__(*args, **kwargs)
self.default_only_baseline = default_only_baseline
def filter(self, qs, value):
baseline_only = self.default_only_baseline
if value is not None:
lc_value = value.lower()
baseline_only = lc_value in ["true", "1"]
if baseline_only:
recipes = list(qs)
if not all(isinstance(recipe, Recipe) for recipe in recipes):
raise TypeError("BaselineCapabilitiesFilter can only be used to filter recipes")
match_ids = []
for recipe in recipes:
if (
recipe.approved_revision
and recipe.approved_revision.uses_only_baseline_capabilities()
):
match_ids.append(recipe.id)
return Recipe.objects.filter(id__in=match_ids)
return qs
class CharSplitFilter(django_filters.CharFilter):
"""Custom CharFilter class that splits the value (if it's set) by `,` into a list
and uses the `__in` operator."""
def filter(self, qs, value):
if value:
qs = qs.filter(**{"{}__in".format(self.field_name): value.split(",")})
return qs
class FilterObjectFieldFilter(django_filters.Filter):
"""
Find recipes that have a filter object with the given field
Format for the filter's value is `key1:value1,key2:value2`. This would
include recipes that have a filter object that has a field `key1` that
contains the value `value1`, and that have a filter object with a field
`key2` that contains `value2`. The two filter objects do not have to be
the same, but may be.
"""
def filter(self, qs, value):
if value is None:
return qs
needles = []
for segment in value.split(","):
if ":" not in segment:
raise serializers.ValidationError(
{"filter_object": "Filters must be of the format `key1:val1,key2:val2,..."}
)
key, val = segment.split(":", 1)
needles.append((key, val))
# Let the database do a first pass filter
for k, v in needles:
qs = qs.filter(latest_revision__filter_object_json__contains=k)
qs = qs.filter(latest_revision__filter_object_json__contains=v)
recipes = list(qs)
if not all(isinstance(recipe, Recipe) for recipe in recipes):
raise TypeError("FilterObjectFieldFilter can only be used to filter recipes")
# For every recipe that contains the right substrings, look through
# their filter objects for an actual match
match_ids = []
for recipe in recipes:
recipe_matches = True
# Recipes needs to have all the keys and values in the needles
for k, v in needles:
for filter_object in recipe.latest_revision.filter_object:
# Don't consider invalid filter objects
if not filter_object.is_valid():
continue
if k in filter_object.data and v in str(filter_object.data[k]):
# Found a match
break
else:
# Did not break, so no match was not found
recipe_matches = False
break
if recipe_matches:
match_ids.append(recipe.id)
return Recipe.objects.filter(id__in=match_ids)
|
mpl-2.0
|
af41c1ff1cd7430c0dab073b7a6dec19
| 35.462121
| 101
| 0.584043
| 4.363554
| false
| false
| false
| false
|
mozilla/normandy
|
normandy/recipes/migrations/0008_auto_20180510_2252.py
|
1
|
1967
|
# Generated by Django 2.0.5 on 2018-05-10 22:52
# flake8: noqa
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import django.utils.timezone
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
("recipes", "0007_convert_simple_filters_to_filter_objects"),
]
operations = [
migrations.CreateModel(
name="EnabledState",
fields=[
(
"id",
models.AutoField(
auto_created=True, primary_key=True, serialize=False, verbose_name="ID"
),
),
("created", models.DateTimeField(default=django.utils.timezone.now)),
("enabled", models.BooleanField(default=False)),
(
"creator",
models.ForeignKey(
null=True,
on_delete=django.db.models.deletion.SET_NULL,
related_name="enabled_states",
to=settings.AUTH_USER_MODEL,
),
),
(
"revision",
models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE,
related_name="enabled_states",
to="recipes.RecipeRevision",
),
),
],
options={"ordering": ("-created",)},
),
migrations.AddField(
model_name="reciperevision",
name="enabled_state",
field=models.ForeignKey(
null=True,
on_delete=django.db.models.deletion.SET_NULL,
related_name="current_for_revision",
to="recipes.EnabledState",
),
),
]
|
mpl-2.0
|
2401bc713daafd41216f75005b3da123
| 32.338983
| 95
| 0.475852
| 5.122396
| false
| false
| false
| false
|
mozilla/normandy
|
normandy/recipes/exports.py
|
1
|
8717
|
import logging
import kinto_http
from django.conf import settings
from django.core.exceptions import ImproperlyConfigured
from normandy.base.utils import ScopedSettings
APPROVE_CHANGES_FLAG = {"status": "to-sign"}
ROLLBACK_CHANGES_FLAG = {"status": "to-rollback"}
logger = logging.getLogger(__name__)
rs_settings = ScopedSettings("REMOTE_SETTINGS_")
def recipe_as_record(recipe):
"""
Transform a recipe to a dict with the minimum amount of fields needed for clients
to verify and execute recipes.
:param recipe: a recipe ready to be exported.
:returns: a dict to be posted on Remote Settings.
"""
from normandy.recipes.api.v1.serializers import (
MinimalRecipeSerializer,
SignatureSerializer,
) # avoid circular imports
record = {
"id": str(recipe.id),
"recipe": MinimalRecipeSerializer(recipe).data,
"signature": SignatureSerializer(recipe.signature).data,
}
return record
class RemoteSettings:
"""
Interacts with a RemoteSettings service.
Recipes get published as records in one or both of the dedicated
collections on Remote Settings. When disabled, those records are removed.
Since Normandy already has the required approval/signoff features, the integration
bypasses the one of Remote Settings (leveraging a specific server configuration for this
particular collection).
There are two collections used. One is the "baseline" collection, which
is used only for recipes that fit within the baseline capabilities, and
are therefore compatible with a broad range of clients. The second is the
"capabilities" collection, in which all recipes are published. Clients
that read from the capabilities collection are expected to process
capabilities and only execute compatible recipes.
.. notes::
Remote Settings signoff workflow relies on several buckets (see kinto-signer API).
The ``main-workspace`` is only readable and writable by authorized accounts.
The ``main`` bucket is read-only, but publicly readable. The Remote Settings
clients pull data from there.
Since the review step is disabled for Normandy, publishing data is done in two steps:
1. Create, update or delete records in the ``main-workspace`` bucket
2. Approve the changes by flipping the ``status`` field to ``to-sign``
in the collection metadata
3. The server will sign and publish the new data to the ``main`` bucket.
"""
def __init__(self):
# Kinto is the underlying implementation of Remote Settings. The client
# is basically a tiny abstraction on top of the requests library.
self.client = (
kinto_http.Client(
server_url=rs_settings.URL,
auth=(rs_settings.USERNAME, rs_settings.PASSWORD),
retry=rs_settings.RETRY_REQUESTS,
)
if rs_settings.URL
else None
)
def check_config(self):
"""
Verify that integration with Remote Settings is configured properly.
"""
if self.client is None:
return # no check if disabled.
required_keys = [
"CAPABILITIES_COLLECTION_ID",
"WORKSPACE_BUCKET_ID",
"PUBLISH_BUCKET_ID",
"USERNAME",
"PASSWORD",
]
for key in required_keys:
if not getattr(settings, f"REMOTE_SETTINGS_{key}"):
msg = f"set settings.REMOTE_SETTINGS_{key} to use Remote Settings integration"
raise ImproperlyConfigured(msg)
# Test authentication.
server_info = self.client.server_info()
is_authenticated = (
"user" in server_info and rs_settings.USERNAME in server_info["user"]["id"]
)
if not is_authenticated:
raise ImproperlyConfigured("Invalid Remote Settings credentials")
# Test that collection is writable.
bucket = rs_settings.WORKSPACE_BUCKET_ID
collection = rs_settings.CAPABILITIES_COLLECTION_ID
metadata = self.client.get_collection(id=collection, bucket=bucket)
if server_info["user"]["id"] not in metadata["permissions"].get("write", []):
raise ImproperlyConfigured(
f"Remote Settings collection {collection} is not writable in bucket {bucket}."
)
# Test that collection has the proper review settings.
capabilities = server_info["capabilities"]
if "signer" in capabilities:
signer_config = capabilities["signer"]
normandy_resource = [
r
for r in signer_config["resources"]
if r["source"]["bucket"] == bucket and r["source"]["collection"] == collection
]
review_disabled = len(normandy_resource) == 1 and not normandy_resource[0].get(
"to_review_enabled", signer_config["to_review_enabled"]
)
if not review_disabled:
raise ImproperlyConfigured(
f"Review was not disabled on Remote Settings collection {collection}."
)
def published_recipes(self):
"""
Return the current list of remote records.
"""
if self.client is None:
raise ImproperlyConfigured("Remote Settings is not enabled.")
capabilities_records = self.client.get_records(
bucket=rs_settings.PUBLISH_BUCKET_ID, collection=rs_settings.CAPABILITIES_COLLECTION_ID
)
return capabilities_records
def publish(self, recipe, approve_changes=True):
"""
Publish the specified `recipe` on the remote server by upserting a record.
"""
if self.client is None:
return # no-op if disabled.
# 1. Put the record.
record = recipe_as_record(recipe)
self.client.update_record(
data=record,
bucket=rs_settings.WORKSPACE_BUCKET_ID,
collection=rs_settings.CAPABILITIES_COLLECTION_ID,
)
# 2. Approve the changes immediately (multi-signoff is disabled).
log_action = "Batch published"
if approve_changes:
self.approve_changes()
log_action = "Published"
logger.info(
f"{log_action} record '{recipe.id}' for recipe {recipe.approved_revision.name!r}"
)
def unpublish(self, recipe, approve_changes=True):
"""
Unpublish the specified `recipe` by deleted its associated record on the remote server.
"""
if self.client is None:
return # no-op if disabled.
# 1. Delete the record
either_existed = False
try:
self.client.delete_record(
id=str(recipe.id),
bucket=rs_settings.WORKSPACE_BUCKET_ID,
collection=rs_settings.CAPABILITIES_COLLECTION_ID,
)
either_existed = True
except kinto_http.KintoException as e:
if e.response.status_code == 404:
logger.warning(
f"The recipe '{recipe.id}' was not published in the capabilities collection. Skip."
)
else:
raise
# 2. Approve the changes immediately (multi-signoff is disabled).
log_action = "Batch deleted"
if either_existed and approve_changes:
self.approve_changes()
log_action = "Deleted"
logger.info(
f"{log_action} record '{recipe.id}' of recipe {recipe.approved_revision.name!r}"
)
def approve_changes(self):
"""
Approve the changes made in the workspace collection.
.. note::
This only works because multi-signoff is disabled for the Normandy recipes
in configuration (see :ref:`remote-settings-install`)
"""
if self.client is None:
return # no-op if disabled.
try:
self.client.patch_collection(
id=rs_settings.CAPABILITIES_COLLECTION_ID,
data=APPROVE_CHANGES_FLAG,
bucket=rs_settings.WORKSPACE_BUCKET_ID,
)
logger.info("Changes were approved.")
except kinto_http.exceptions.KintoException:
# Approval failed unexpectedly.
# The changes in the `main-workspace` bucket must be reverted.
self.client.patch_collection(
id=rs_settings.CAPABILITIES_COLLECTION_ID,
data=ROLLBACK_CHANGES_FLAG,
bucket=rs_settings.WORKSPACE_BUCKET_ID,
)
raise
|
mpl-2.0
|
34b143a423a75644a2ca83f9b7e6a801
| 35.780591
| 103
| 0.614087
| 4.571054
| false
| true
| false
| false
|
mozilla/normandy
|
normandy/recipes/migrations/0009_auto_20180510_2328.py
|
1
|
1037
|
# Generated by Django 2.0.5 on 2018-05-10 23:28
from django.db import migrations
def enabled_to_enabled_state(apps, schema_editor):
Recipe = apps.get_model("recipes", "Recipe")
EnabledState = apps.get_model("recipes", "EnabledState")
for recipe in Recipe.objects.filter(enabled=True):
if recipe.approved_revision:
es = EnabledState.objects.create(revision=recipe.approved_revision, enabled=True)
es.current_for_revision.add(recipe.approved_revision)
def enabled_state_to_enabled(apps, schema_editor):
Recipe = apps.get_model("recipes", "Recipe")
for recipe in Recipe.objects.exclude(approved_revision=None):
enabled_state = recipe.approved_revision.enabled_state
if enabled_state and enabled_state.enabled:
recipe.enabled = True
recipe.save()
class Migration(migrations.Migration):
dependencies = [("recipes", "0008_auto_20180510_2252")]
operations = [migrations.RunPython(enabled_to_enabled_state, enabled_state_to_enabled)]
|
mpl-2.0
|
c93381001079413db6083e24aca3a9d3
| 33.566667
| 93
| 0.702025
| 3.743682
| false
| false
| false
| false
|
mozilla/normandy
|
normandy/studies/tests/__init__.py
|
1
|
4038
|
import factory
import json
import tempfile
import zipfile
from factory.django import DjangoModelFactory
from faker import Faker
from normandy.base.tests import FuzzyUnicode
from normandy.studies.models import Extension
INSTALL_RDF_TEMPLATE = """<?xml version="1.0" encoding="utf-8"?>
<RDF xmlns="http://w3.org/1999/02/22-rdf-syntax-ns#" xmlns:em="http://mozilla.org/2004/em-rdf#">
<Description about="urn:mozilla:install-manifest">
<em:type>2</em:type>
<em:bootstrap>true</em:bootstrap>
<em:unpack>false</em:unpack>
<em:multiprocessCompatible>true</em:multiprocessCompatible>
{}
<em:targetApplication>
<Description>
<em:id>{{ec8030f7-c20a-464f-9b0e-13a3a9e97384}}</em:id>
<em:minVersion>52.0</em:minVersion>
<em:maxVersion>*</em:maxVersion>
</Description>
</em:targetApplication>
</Description>
</RDF>
"""
class XPIFileFactory(object):
def __init__(self, signed=True):
# Generate a unique random path for the new XPI file
f, self._path = tempfile.mkstemp(suffix=".xpi")
# Create a blank zip file on disk
zf = zipfile.ZipFile(self.path, mode="w")
zf.close()
if signed:
self.add_file("META-INF/manifest.mf", b"")
self.add_file("META-INF/mozilla.rsa", b"")
self.add_file("META-INF/mozilla.sf", b"")
@property
def path(self):
return self._path
def add_file(self, filename, data):
with zipfile.ZipFile(self.path, mode="a") as zf:
with zf.open(filename, mode="w") as f:
f.write(data)
def open(self, mode="rb"):
return open(self.path, mode="rb")
class WebExtensionFileFactory(XPIFileFactory):
def __init__(self, signed=True, from_file=None, gecko_id=None, overwrite_data=None):
super().__init__(signed=signed)
if not gecko_id:
gecko_id = f"{Faker().md5()}@normandy.mozilla.org"
if from_file:
self._manifest = json.load(from_file)
else:
self._manifest = {
"manifest_version": 2,
"name": "normandy test addon",
"version": "0.1",
"description": "This is an add-on for us in Normandy's tests",
"applications": {"gecko": {"id": gecko_id}},
}
if overwrite_data:
self._manifest.update(overwrite_data)
self.save_manifest()
@property
def manifest(self):
return self._manifest
def save_manifest(self):
self.add_file("manifest.json", json.dumps(self.manifest).encode())
def update_manifest(self, data):
self._manifest.update(data)
self.save_manifest()
def replace_manifest(self, data):
self._manifest = data
self.save_manifest()
class LegacyAddonFileFactory(XPIFileFactory):
def __init__(self, signed=True, from_file=None, addon_id=None, overwrite_data=None):
super().__init__(signed=signed)
if not addon_id:
name = Faker().md5()
addon_id = f"{name}@normandy.mozilla.org"
if from_file:
with open(from_file, "rb") as f:
self.add_file("install.rdf", f.read())
else:
data = {
"id": addon_id,
"version": "0.1",
"name": "Signed Bootstrap Mozilla Extension Example",
"description": "Example of a bootstrapped addon",
}
if overwrite_data:
data.update(overwrite_data)
self.generate_install_rdf(data)
def generate_install_rdf(self, data):
insert = ""
for k in data:
insert += "<em:{}>{}</em:{}>\n".format(k, data[k], k)
self.add_file("install.rdf", INSTALL_RDF_TEMPLATE.format(insert).encode())
class ExtensionFactory(DjangoModelFactory):
name = FuzzyUnicode()
xpi = factory.django.FileField(from_func=lambda: WebExtensionFileFactory().open())
class Meta:
model = Extension
|
mpl-2.0
|
193a501d87316d4193e4f9305219f558
| 28.911111
| 96
| 0.586924
| 3.628032
| false
| false
| false
| false
|
mozilla/normandy
|
normandy/recipes/management/commands/initial_data.py
|
1
|
1928
|
from django.core.management.base import BaseCommand
from django_countries import countries
from normandy.recipes.models import Channel, Country, WindowsVersion
class Command(BaseCommand):
"""
Adds some helpful initial data to the site's database. If matching
data already exists, it should _not_ be overwritten, making this
safe to run multiple times.
This exists instead of data migrations so that test runs do not load
this data into the test database.
If this file grows too big, we should consider finding a library or
coming up with a more robust way of adding this data.
"""
help = "Adds initial data to database"
def handle(self, *args, **options):
self.add_release_channels()
self.add_countries()
self.add_windows_versions()
def add_release_channels(self):
self.stdout.write("Adding Release Channels...", ending="")
channels = {
"release": "Release",
"beta": "Beta",
"aurora": "Developer Edition",
"nightly": "Nightly",
}
for slug, name in channels.items():
Channel.objects.update_or_create(slug=slug, defaults={"name": name})
self.stdout.write("Done")
def add_countries(self):
self.stdout.write("Adding Countries...", ending="")
for code, name in countries:
Country.objects.update_or_create(code=code, defaults={"name": name})
self.stdout.write("Done")
def add_windows_versions(self):
self.stdout.write("Adding Windows Versions...", ending="")
versions = [
(6.1, "Windows 7"),
(6.2, "Windows 8"),
(6.3, "Windows 8.1"),
(10.0, "Windows 10"),
]
for nt_version, name in versions:
WindowsVersion.objects.update_or_create(nt_version=nt_version, defaults={"name": name})
self.stdout.write("Done")
|
mpl-2.0
|
38c6b007ae090d295399b47eca56512a
| 32.824561
| 99
| 0.61722
| 4.17316
| false
| false
| false
| false
|
mozilla/normandy
|
contract-tests/v1_api/test_performance.py
|
1
|
3083
|
from urllib.parse import urljoin
import html5lib
import pytest
"""These are paths hit by self repair that need to be very fast"""
HOT_PATHS = [
"/en-US/repair",
"/en-US/repair/",
"/api/v1/recipe/?enabled=1",
"/api/v1/recipe/signed/?enabled=1",
"/api/v1/action/",
]
@pytest.mark.parametrize("path", HOT_PATHS)
class TestHotPaths(object):
"""
Test for performance-enhancing properties of the site.
This file does not test performance by measuring runtimes and throughput.
Instead it tests for markers of features that would speed up or slow down the
site, such as cache headers.
"""
def test_no_redirects(self, conf, requests_session, path):
r = requests_session.get(conf.getoption("server") + path)
r.raise_for_status()
assert 200 <= r.status_code < 300
def test_no_vary_cookie(self, conf, requests_session, path, only_readonly):
r = requests_session.get(conf.getoption("server") + path)
r.raise_for_status()
assert "cookie" not in r.headers.get("vary", "").lower()
def test_cache_headers(self, conf, requests_session, path, only_readonly):
if path.startswith("/api/"):
pytest.xfail("caching temporarily hidden on api by nginx")
r = requests_session.get(conf.getoption("server") + path)
r.raise_for_status()
cache_control = r.headers.get("cache-control")
assert cache_control is not None
# parse cache-control header.
parts = [part.strip() for part in cache_control.split(",")]
max_age = [part for part in parts if part.startswith("max-age=")][0]
max_age_seconds = int(max_age.split("=")[1])
assert "public" in parts
assert max_age_seconds > 0
def test_static_cache_headers(conf, requests_session):
"""Test that all scripts included from self-repair have long lived cache headers"""
req = requests_session.get(conf.getoption("server") + "/en-US/repair")
req.raise_for_status()
document = html5lib.parse(req.content, treebuilder="dom")
scripts = document.getElementsByTagName("script")
for script in scripts:
src = script.getAttribute("src")
url = urljoin(conf.getoption("server"), src)
script_req = requests_session.get(url)
script_req.raise_for_status()
cache_control = parse_cache_control(script_req.headers["cache-control"])
assert cache_control["public"], f"Cache-control: public for {url}"
ONE_YEAR = 31_536_000
assert cache_control["max-age"] >= ONE_YEAR, f"Cache-control: max-age > 1 year for {url}"
assert cache_control["immutable"], f"Cache-control: immutable for {url}"
def parse_cache_control(header):
parsed = {}
parts = header.split(",")
for part in parts:
part = part.strip()
if "=" in part:
key, val = part.split("=", 1)
try:
val = int(val)
except ValueError:
pass
parsed[key] = val
else:
parsed[part] = True
return parsed
|
mpl-2.0
|
5cebd95963e1d0f119d1be4d966f587c
| 35.270588
| 97
| 0.626014
| 3.759756
| false
| true
| false
| false
|
mozilla/normandy
|
normandy/recipes/migrations/0004_auto_20180502_2340.py
|
1
|
5164
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.11 on 2018-05-02 23:40
# flake8: noqa
from __future__ import unicode_literals
import hashlib
from django.db import migrations
def create_tmp_from_revision(apps, revision, parent=None):
ApprovalRequest = apps.get_model("recipes", "ApprovalRequest")
TmpRecipeRevision = apps.get_model("recipes", "TmpRecipeRevision")
tmp = TmpRecipeRevision(
created=revision.created,
updated=revision.updated,
comment=revision.comment,
name=revision.name,
arguments_json=revision.arguments_json,
extra_filter_expression=revision.extra_filter_expression,
identicon_seed=revision.identicon_seed,
action=revision.action,
parent=parent,
recipe=revision.recipe,
user=revision.user,
)
tmp.save()
if revision.approved_for_recipe.count():
tmp.approved_for_recipe.add(revision.approved_for_recipe.get())
if revision.latest_for_recipe.count():
tmp.latest_for_recipe.add(revision.latest_for_recipe.get())
try:
approval_request = revision.approval_request
approval_request.tmp_revision = tmp
approval_request.save()
except ApprovalRequest.DoesNotExist:
pass
for channel in revision.channels.all():
tmp.channels.add(channel)
for country in revision.countries.all():
tmp.countries.add(country)
for locale in revision.locales.all():
tmp.locales.add(locale)
return tmp
def copy_revisions_to_tmp(apps, schema_editor):
RecipeRevision = apps.get_model("recipes", "RecipeRevision")
for revision in RecipeRevision.objects.filter(parent=None):
current_rev = revision
parent_tmp = create_tmp_from_revision(apps, current_rev)
try:
while current_rev.child:
parent_tmp = create_tmp_from_revision(apps, current_rev.child, parent=parent_tmp)
current_rev = current_rev.child
except RecipeRevision.DoesNotExist:
pass
def get_filter_expression(revision):
parts = []
if revision.locales.count():
locales = ", ".join(["'{}'".format(l.code) for l in revision.locales.all()])
parts.append("normandy.locale in [{}]".format(locales))
if revision.countries.count():
countries = ", ".join(["'{}'".format(c.code) for c in revision.countries.all()])
parts.append("normandy.country in [{}]".format(countries))
if revision.channels.count():
channels = ", ".join(["'{}'".format(c.slug) for c in revision.channels.all()])
parts.append("normandy.channel in [{}]".format(channels))
if revision.extra_filter_expression:
parts.append(revision.extra_filter_expression)
expression = ") && (".join(parts)
return "({})".format(expression) if len(parts) > 1 else expression
def hash(revision):
data = "{}{}{}{}{}{}".format(
revision.recipe.id,
revision.created,
revision.name,
revision.action.id,
revision.arguments_json,
get_filter_expression(revision),
)
return hashlib.sha256(data.encode()).hexdigest()
def create_revision_from_tmp(apps, tmp, parent=None):
ApprovalRequest = apps.get_model("recipes", "ApprovalRequest")
RecipeRevision = apps.get_model("recipes", "RecipeRevision")
rev = RecipeRevision(
created=tmp.created,
updated=tmp.updated,
comment=tmp.comment,
name=tmp.name,
arguments_json=tmp.arguments_json,
extra_filter_expression=tmp.extra_filter_expression,
identicon_seed=tmp.identicon_seed,
action=tmp.action,
parent=parent,
recipe=tmp.recipe,
user=tmp.user,
)
initial_id = hash(tmp)
rev.id = initial_id
rev.save()
if tmp.approved_for_recipe.count():
rev.approved_for_recipe.add(tmp.approved_for_recipe.get())
if tmp.latest_for_recipe.count():
rev.latest_for_recipe.add(tmp.latest_for_recipe.get())
try:
approval_request = tmp.approval_request
approval_request.revision = rev
approval_request.save()
except ApprovalRequest.DoesNotExist:
pass
for channel in tmp.channels.all():
rev.channels.add(channel)
for country in tmp.countries.all():
rev.countries.add(country)
for locale in tmp.locales.all():
rev.locales.add(locale)
return rev
def copy_tmp_to_revisions(apps, schema_editor):
TmpRecipeRevision = apps.get_model("recipes", "TmpRecipeRevision")
for tmp in TmpRecipeRevision.objects.filter(parent=None):
current_tmp = tmp
parent_rev = create_revision_from_tmp(apps, current_tmp)
try:
while current_tmp.child:
parent_rev = create_revision_from_tmp(apps, current_tmp.child, parent=parent_rev)
current_tmp = current_tmp.child
except TmpRecipeRevision.DoesNotExist:
pass
class Migration(migrations.Migration):
dependencies = [("recipes", "0003_tmpreciperevision")]
operations = [migrations.RunPython(copy_revisions_to_tmp, copy_tmp_to_revisions)]
|
mpl-2.0
|
b08fd3e120b5fcd53bc0145f4b760be4
| 28.849711
| 97
| 0.647366
| 3.819527
| false
| false
| false
| false
|
mozilla/normandy
|
normandy/recipes/api/v3/serializers.py
|
1
|
11345
|
from rest_framework import serializers
from factory.fuzzy import FuzzyText
from normandy.base.api.v3.serializers import UserSerializer
from normandy.base.jexl import get_normandy_jexl
from normandy.recipes import filters
from normandy.recipes.api.fields import (
ActionImplementationHyperlinkField,
FilterObjectField,
)
from normandy.recipes.models import (
Action,
ApprovalRequest,
EnabledState,
Recipe,
RecipeRevision,
Signature,
)
from normandy.recipes.validators import JSONSchemaValidator
class CustomizableSerializerMixin:
"""Serializer Mixin that allows callers to exclude fields on instance of this serializer."""
def __init__(self, *args, **kwargs):
exclude_fields = kwargs.pop("exclude_fields", [])
super().__init__(*args, **kwargs)
if exclude_fields:
for field in exclude_fields:
self.fields.pop(field)
class ActionSerializer(serializers.ModelSerializer):
arguments_schema = serializers.JSONField()
implementation_url = ActionImplementationHyperlinkField()
class Meta:
model = Action
fields = ["arguments_schema", "name", "id", "implementation_url"]
class ApprovalRequestSerializer(serializers.ModelSerializer):
approver = UserSerializer()
created = serializers.DateTimeField(read_only=True)
creator = UserSerializer()
revision = serializers.SerializerMethodField(read_only=True)
class Meta:
model = ApprovalRequest
fields = ["approved", "approver", "comment", "created", "creator", "id", "revision"]
def get_revision(self, instance):
serializer = RecipeRevisionLinkSerializer(instance.revision)
return serializer.data
class EnabledStateSerializer(CustomizableSerializerMixin, serializers.ModelSerializer):
creator = UserSerializer()
class Meta:
model = EnabledState
fields = ["id", "revision_id", "created", "creator", "enabled", "carryover_from"]
class RecipeRevisionSerializer(serializers.ModelSerializer):
action = serializers.SerializerMethodField(read_only=True)
approval_request = ApprovalRequestSerializer(read_only=True)
capabilities = serializers.ListField(read_only=True)
comment = serializers.CharField(required=False)
creator = UserSerializer(source="user", read_only=True)
date_created = serializers.DateTimeField(source="created", read_only=True)
enabled_states = EnabledStateSerializer(many=True, exclude_fields=["revision_id"])
filter_object = serializers.ListField(child=FilterObjectField())
recipe = serializers.SerializerMethodField(read_only=True)
class Meta:
model = RecipeRevision
fields = [
"action",
"approval_request",
"arguments",
"experimenter_slug",
"capabilities",
"comment",
"creator",
"date_created",
"enabled_states",
"enabled",
"extra_capabilities",
"extra_filter_expression",
"filter_expression",
"filter_object",
"id",
"identicon_seed",
"metadata",
"name",
"recipe",
"updated",
]
def get_recipe(self, instance):
serializer = RecipeLinkSerializer(instance.recipe)
return serializer.data
def get_action(self, instance):
serializer = ActionSerializer(
instance.action, read_only=True, context={"request": self.context.get("request")}
)
return serializer.data
class SignatureSerializer(serializers.ModelSerializer):
timestamp = serializers.DateTimeField(read_only=True)
signature = serializers.ReadOnlyField()
x5u = serializers.ReadOnlyField()
public_key = serializers.ReadOnlyField()
class Meta:
model = Signature
fields = ["timestamp", "signature", "x5u", "public_key"]
class RecipeSerializer(CustomizableSerializerMixin, serializers.ModelSerializer):
# read-only fields
approved_revision = RecipeRevisionSerializer(read_only=True)
latest_revision = RecipeRevisionSerializer(read_only=True)
signature = SignatureSerializer(read_only=True)
uses_only_baseline_capabilities = serializers.BooleanField(
source="latest_revision.uses_only_baseline_capabilities", read_only=True
)
# write-only fields
action_id = serializers.PrimaryKeyRelatedField(
source="action", queryset=Action.objects.all(), write_only=True
)
arguments = serializers.JSONField(write_only=True)
extra_filter_expression = serializers.CharField(
required=False, allow_blank=True, write_only=True
)
filter_object = serializers.ListField(
child=FilterObjectField(), required=False, write_only=True
)
name = serializers.CharField(write_only=True)
identicon_seed = serializers.CharField(required=False, write_only=True)
comment = serializers.CharField(required=False, write_only=True)
experimenter_slug = serializers.CharField(
required=False, write_only=True, allow_null=True, allow_blank=True
)
extra_capabilities = serializers.ListField(required=False, write_only=True)
class Meta:
model = Recipe
fields = [
# read-only
"approved_revision",
"id",
"latest_revision",
"signature",
"uses_only_baseline_capabilities",
# write-only
"action_id",
"arguments",
"extra_filter_expression",
"filter_object",
"name",
"identicon_seed",
"comment",
"experimenter_slug",
"extra_capabilities",
]
def get_action(self, instance):
serializer = ActionSerializer(
instance.latest_revision.action,
read_only=True,
context={"request": self.context.get("request")},
)
return serializer.data
def update(self, instance, validated_data):
request = self.context.get("request")
if request and request.user:
validated_data["user"] = request.user
instance.revise(**validated_data)
return instance
def create(self, validated_data):
request = self.context.get("request")
if request and request.user:
validated_data["user"] = request.user
if "identicon_seed" not in validated_data:
validated_data["identicon_seed"] = f"v1:{FuzzyText().fuzz()}"
recipe = Recipe.objects.create()
return self.update(recipe, validated_data)
def validate_extra_filter_expression(self, value):
if value:
jexl = get_normandy_jexl()
errors = list(jexl.validate(value))
if errors:
raise serializers.ValidationError(errors)
return value
def validate(self, data):
data = super().validate(data)
action = data.get("action")
if action is None:
action = self.instance.latest_revision.action
arguments = data.get("arguments")
if arguments is not None:
# Ensure the value is a dict
if not isinstance(arguments, dict):
raise serializers.ValidationError({"arguments": "Must be an object."})
# Get the schema associated with the selected action
schema = action.arguments_schema
schemaValidator = JSONSchemaValidator(schema)
errorResponse = {}
errors = sorted(schemaValidator.iter_errors(arguments), key=lambda e: e.path)
# Loop through ValidationErrors returned by JSONSchema
# Each error contains a message and a path attribute
# message: string human-readable error explanation
# path: list containing path to offending element
for error in errors:
currentLevel = errorResponse
# Loop through the path of the current error
# e.g. ['surveys'][0]['weight']
for index, path in enumerate(error.path):
# If this key already exists in our error response, step into it
if path in currentLevel:
currentLevel = currentLevel[path]
continue
else:
# If we haven't reached the end of the path, add this path
# as a key in our error response object and step into it
if index < len(error.path) - 1:
currentLevel[path] = {}
currentLevel = currentLevel[path]
continue
# If we've reached the final path, set the error message
else:
currentLevel[path] = error.message
if errorResponse:
raise serializers.ValidationError({"arguments": errorResponse})
if self.instance is None:
if data.get("extra_filter_expression", "").strip() == "":
if not data.get("filter_object"):
raise serializers.ValidationError(
"one of extra_filter_expression or filter_object is required"
)
else:
if "extra_filter_expression" in data or "filter_object" in data:
# If either is attempted to be updated, at least one of them must be truthy.
if not data.get("extra_filter_expression", "").strip() and not data.get(
"filter_object"
):
raise serializers.ValidationError(
"if extra_filter_expression is blank, "
"at least one filter_object is required"
)
return data
def validate_filter_object(self, value):
if not isinstance(value, list):
raise serializers.ValidationError(
{"non field errors": ["filter_object must be a list."]}
)
errors = {}
for i, obj in enumerate(value):
if not isinstance(obj, dict):
errors[i] = {"non field errors": ["filter_object members must be objects."]}
continue
if "type" not in obj:
errors[i] = {"type": ["This field is required."]}
break
Filter = filters.by_type.get(obj["type"])
if Filter is not None:
filter = Filter(data=obj)
if not filter.is_valid():
errors[i] = filter.errors
else:
errors[i] = {"type": [f'Unknown filter object type "{obj["type"]}".']}
if errors:
raise serializers.ValidationError(errors)
return value
class RecipeLinkSerializer(RecipeSerializer):
class Meta(RecipeSerializer.Meta):
fields = ["approved_revision_id", "id", "latest_revision_id"]
class RecipeRevisionLinkSerializer(RecipeRevisionSerializer):
recipe_id = serializers.SerializerMethodField(read_only=True)
class Meta(RecipeSerializer.Meta):
fields = ["id", "recipe_id"]
def get_recipe_id(self, instance):
return instance.recipe.id
|
mpl-2.0
|
9e69db44885b75be18a03b20bf883646
| 34.676101
| 96
| 0.606435
| 4.634395
| false
| false
| false
| false
|
mozilla/normandy
|
contract-tests/v3_api/test_group_delete.py
|
1
|
1231
|
import uuid
from support.assertions import assert_valid_schema
from urllib.parse import urljoin
def test_group_delete(conf, requests_session, headers):
# Create a new group
data = {"name": str(uuid.uuid4())}
response = requests_session.post(
urljoin(conf.getoption("server"), "/api/v3/group/"), headers=headers, data=data
)
assert response.status_code == 201
assert_valid_schema(response.json())
group_data = response.json()
group_id = group_data["id"]
# Verify group was stored and contains expected data
response = requests_session.get(
urljoin(conf.getoption("server"), "/api/v3/group/{}/".format(group_id)), headers=headers
)
group_data = response.json()
assert response.status_code == 200
assert_valid_schema(response.json())
# Delete the group
response = requests_session.delete(
urljoin(conf.getoption("server"), "/api/v3/group/{}/".format(group_id)), headers=headers
)
assert response.status_code == 204
# Verify that it no longer exists
response = requests_session.get(
urljoin(conf.getoption("server"), "/api/v3/group/{}/".format(group_id)), headers=headers
)
assert response.status_code == 404
|
mpl-2.0
|
709834fd8468eb48cb136525254f26bc
| 33.194444
| 96
| 0.670187
| 3.799383
| false
| false
| false
| false
|
developmentseed/landsat-util
|
docs/conf.py
|
9
|
9890
|
# -*- coding: utf-8 -*-
#
# Landsat-util documentation build configuration file, created by
# sphinx-quickstart on Thu May 28 17:52:10 2015.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
from mock import Mock as MagicMock
class Mock(MagicMock):
@classmethod
def __getattr__(cls, name):
return Mock()
MOCK_MODULES = ['numpy', 'rasterio', 'scipy', 'scikit-image', 'homura', 'boto',
'termcolor', 'requests', 'python-dateutil']
sys.modules.update((mod_name, Mock()) for mod_name in MOCK_MODULES)
import os
import sphinx_rtd_theme
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
cwd = os.getcwd()
project_root = os.path.dirname(cwd)
sys.path.insert(0, project_root)
print project_root
import landsat
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.doctest',
'sphinx.ext.coverage',
'sphinx.ext.ifconfig',
'sphinx.ext.viewcode',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'landsat-util'
copyright = u'2015, Development Seed'
author = u'Development Seed'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = landsat.__version__
# The full version, including alpha/beta/rc tags.
release = landsat.__version__
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = "sphinx_rtd_theme"
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr'
#html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# Now only 'ja' uses this config value
#html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
#html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = 'Landsat-utildoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
# Latex figure (float) alignment
#'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'Landsat-util.tex', u'Landsat-util Documentation',
u'Development Seed', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'landsat-util', u'Landsat-util Documentation',
[author], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'Landsat-util', u'Landsat-util Documentation',
author, 'Landsat-util', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
|
cc0-1.0
|
299f01338ac03d06f9f64eefa0b432b3
| 30.800643
| 79
| 0.705359
| 3.653491
| false
| true
| false
| false
|
rmmh/skybot
|
plugins/lastfm.py
|
3
|
2391
|
"""
The Last.fm API key is retrieved from the bot config file.
"""
from util import hook, http
api_url = "http://ws.audioscrobbler.com/2.0/?format=json"
@hook.api_key("lastfm")
@hook.command(autohelp=False)
def lastfm(inp, chan="", nick="", reply=None, api_key=None, db=None):
".lastfm <username> [dontsave] | @<nick> -- gets current or last played " "track from lastfm"
db.execute(
"create table if not exists "
"lastfm(chan, nick, user, primary key(chan, nick))"
)
if inp[0:1] == "@":
nick = inp[1:].strip()
user = None
dontsave = True
else:
user = inp
dontsave = user.endswith(" dontsave")
if dontsave:
user = user[:-9].strip().lower()
if not user:
user = db.execute(
"select user from lastfm where chan=? and nick=lower(?)", (chan, nick)
).fetchone()
if not user:
return lastfm.__doc__
user = user[0]
response = http.get_json(
api_url, method="user.getrecenttracks", api_key=api_key, user=user, limit=1
)
if "error" in response:
return "error: %s" % response["message"]
if (
not "track" in response["recenttracks"]
or len(response["recenttracks"]["track"]) == 0
):
return "no recent tracks for user \x02%s\x0F found" % user
tracks = response["recenttracks"]["track"]
if type(tracks) == list:
# if the user is listening to something, the tracks entry is a list
# the first item is the current track
track = tracks[0]
status = "current track"
elif type(tracks) == dict:
# otherwise, they aren't listening to anything right now, and
# the tracks entry is a dict representing the most recent track
track = tracks
status = "last track"
else:
return "error parsing track listing"
title = track["name"]
album = track["album"]["#text"]
artist = track["artist"]["#text"]
ret = "\x02%s\x0F's %s - \x02%s\x0f" % (user, status, title)
if artist:
ret += " by \x02%s\x0f" % artist
if album:
ret += " on \x02%s\x0f" % album
reply(ret)
if inp and not dontsave:
db.execute(
"insert or replace into lastfm(chan, nick, user) " "values (?, ?, ?)",
(chan, nick.lower(), inp),
)
db.commit()
|
unlicense
|
636c141db11c52dd6b085daa4d1fa441
| 27.129412
| 97
| 0.563363
| 3.531758
| false
| false
| false
| false
|
rmmh/skybot
|
plugins/google.py
|
2
|
1308
|
from __future__ import unicode_literals
import random
from util import hook, http
def api_get(query, key, is_image=None, num=1):
url = (
"https://www.googleapis.com/customsearch/v1?cx=007629729846476161907:ud5nlxktgcw"
"&fields=items(title,link,snippet)&safe=off&nfpr=1"
+ ("&searchType=image" if is_image else "")
)
return http.get_json(url, key=key, q=query, num=num)
@hook.api_key("google")
@hook.command("can i get a picture of")
@hook.command("can you grab me a picture of")
@hook.command("give me a print out of")
@hook.command
def gis(inp, api_key=None):
""".gis <term> -- finds an image using google images (safesearch off)"""
parsed = api_get(inp, api_key, is_image=True, num=10)
if "items" not in parsed:
return "no images found"
return random.choice(parsed["items"])["link"]
@hook.api_key("google")
@hook.command("g")
@hook.command
def google(inp, api_key=None):
""".g/.google <query> -- returns first google search result"""
parsed = api_get(inp, api_key)
if "items" not in parsed:
return "no results found"
out = '{link} -- \x02{title}\x02: "{snippet}"'.format(**parsed["items"][0])
out = " ".join(out.split())
if len(out) > 300:
out = out[: out.rfind(" ")] + '..."'
return out
|
unlicense
|
f118032a0344f7392e8812fe3793d67c
| 26.829787
| 89
| 0.626147
| 3.056075
| false
| false
| false
| false
|
rmmh/skybot
|
plugins/util/timesince.py
|
3
|
4139
|
# Copyright (c) Django Software Foundation and individual contributors.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# 3. Neither the name of Django nor the names of its contributors may be used
# to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED.IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
# ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import datetime
def timesince(d, now=None):
"""
Takes two datetime objects and returns the time between d and now
as a nicely formatted string, e.g. "10 minutes". If d occurs after now,
then "0 minutes" is returned.
Units used are years, months, weeks, days, hours, and minutes.
Seconds and microseconds are ignored. Up to two adjacent units will be
displayed. For example, "2 weeks, 3 days" and "1 year, 3 months" are
possible outputs, but "2 weeks, 3 hours" and "1 year, 5 days" are not.
Adapted from http://blog.natbat.co.uk/archive/2003/Jun/14/time_since
"""
chunks = (
(60 * 60 * 24 * 365, ("year", "years")),
(60 * 60 * 24 * 30, ("month", "months")),
(60 * 60 * 24 * 7, ("week", "weeks")),
(60 * 60 * 24, ("day", "days")),
(60 * 60, ("hour", "hours")),
(60, ("minute", "minutes")),
)
# Convert int or float (unix epoch) to datetime.datetime for comparison
if isinstance(d, int) or isinstance(d, float):
d = datetime.datetime.fromtimestamp(d)
# Convert datetime.date to datetime.datetime for comparison.
if not isinstance(d, datetime.datetime):
d = datetime.datetime(d.year, d.month, d.day)
if now and not isinstance(now, datetime.datetime):
now = datetime.datetime(now.year, now.month, now.day)
if not now:
now = datetime.datetime.now()
# ignore microsecond part of 'd' since we removed it from 'now'
delta = now - (d - datetime.timedelta(0, 0, d.microsecond))
since = delta.days * 24 * 60 * 60 + delta.seconds
if since <= 0:
# d is in the future compared to now, stop processing.
return "0 " + "minutes"
for i, (seconds, name) in enumerate(chunks):
count = since // seconds
if count != 0:
break
if count == 1:
s = "%(number)d %(type)s" % {"number": count, "type": name[0]}
else:
s = "%(number)d %(type)s" % {"number": count, "type": name[1]}
if i + 1 < len(chunks):
# Now get the second item
seconds2, name2 = chunks[i + 1]
count2 = (since - (seconds * count)) // seconds2
if count2 != 0:
if count2 == 1:
s += ", %d %s" % (count2, name2[0])
else:
s += ", %d %s" % (count2, name2[1])
return s
def timeuntil(d, now=None):
"""
Like timesince, but returns a string measuring the time until
the given time.
"""
if not now:
now = datetime.datetime.now()
return timesince(now, d)
|
unlicense
|
aa53f92f5fdd6e27a86d9046dc52bf9f
| 39.578431
| 80
| 0.650882
| 3.930674
| false
| false
| false
| false
|
rmmh/skybot
|
plugins/mtg.py
|
3
|
2470
|
from __future__ import print_function
from builtins import range
from util import hook, http
import random
def card_search(name):
matching_cards = http.get_json(
"https://api.magicthegathering.io/v1/cards", name=name
)
for card in matching_cards["cards"]:
if card["name"].lower() == name.lower():
return card
return random.choice(matching_cards["cards"])
@hook.command
def mtg(inp, say=None):
""".mtg <name> - Searches for Magic the Gathering card given <name>"""
try:
card = card_search(inp)
except IndexError:
return "Card not found."
symbols = {
"{0}": "0",
"{1}": "1",
"{2}": "2",
"{3}": "3",
"{4}": "4",
"{5}": "5",
"{6}": "6",
"{7}": "7",
"{8}": "8",
"{9}": "9",
"{10}": "10",
"{11}": "11",
"{12}": "12",
"{13}": "13",
"{14}": "14",
"{15}": "15",
"{16}": "16",
"{17}": "17",
"{18}": "18",
"{19}": "19",
"{20}": "20",
"{T}": "\u27F3",
"{S}": "\u2744",
"{Q}": "\u21BA",
"{C}": "\u27E1",
"{W}": "W",
"{U}": "U",
"{B}": "B",
"{R}": "R",
"{G}": "G",
"{W/P}": "\u03D5",
"{U/P}": "\u03D5",
"{B/P}": "\u03D5",
"{R/P}": "\u03D5",
"{G/P}": "\u03D5",
"{X}": "X",
"\n": " ",
}
results = {
"name": card["name"],
"type": card["type"],
"cost": card.get("manaCost", ""),
"text": card.get("text", ""),
"power": card.get("power"),
"toughness": card.get("toughness"),
"loyalty": card.get("loyalty"),
"multiverseid": card.get("multiverseid"),
}
for fragment, rep in symbols.items():
results["text"] = results["text"].replace(fragment, rep)
results["cost"] = results["cost"].replace(fragment, rep)
template = ["{name} -"]
template.append("{type}")
template.append("- {cost} |")
if results["loyalty"]:
template.append("{loyalty} Loyalty |")
if results["power"]:
template.append("{power}/{toughness} |")
template.append(
"{text} | http://gatherer.wizards.com/Pages/Card/Details.aspx?multiverseid={multiverseid}"
)
return " ".join(template).format(**results)
if __name__ == "__main__":
print(card_search("Black Lotus"))
print(mtg("Black Lotus"))
|
unlicense
|
32c6db1674583320bea728226b9561ab
| 25
| 98
| 0.448178
| 3.130545
| false
| false
| false
| false
|
pytube/pytube
|
pytube/cipher.py
|
1
|
22529
|
"""
This module contains all logic necessary to decipher the signature.
YouTube's strategy to restrict downloading videos is to send a ciphered version
of the signature to the client, along with the decryption algorithm obfuscated
in JavaScript. For the clients to play the videos, JavaScript must take the
ciphered version, cycle it through a series of "transform functions," and then
signs the media URL with the output.
This module is responsible for (1) finding and extracting those "transform
functions" (2) maps them to Python equivalents and (3) taking the ciphered
signature and decoding it.
"""
import logging
import re
from itertools import chain
from typing import Any, Callable, Dict, List, Optional, Tuple
from pytube.exceptions import ExtractError, RegexMatchError
from pytube.helpers import cache, regex_search
from pytube.parser import find_object_from_startpoint, throttling_array_split
logger = logging.getLogger(__name__)
class Cipher:
def __init__(self, js: str):
self.transform_plan: List[str] = get_transform_plan(js)
var_regex = re.compile(r"^\w+\W")
var_match = var_regex.search(self.transform_plan[0])
if not var_match:
raise RegexMatchError(
caller="__init__", pattern=var_regex.pattern
)
var = var_match.group(0)[:-1]
self.transform_map = get_transform_map(js, var)
self.js_func_patterns = [
r"\w+\.(\w+)\(\w,(\d+)\)",
r"\w+\[(\"\w+\")\]\(\w,(\d+)\)"
]
self.throttling_plan = get_throttling_plan(js)
self.throttling_array = get_throttling_function_array(js)
self.calculated_n = None
def calculate_n(self, initial_n: list):
"""Converts n to the correct value to prevent throttling."""
if self.calculated_n:
return self.calculated_n
# First, update all instances of 'b' with the list(initial_n)
for i in range(len(self.throttling_array)):
if self.throttling_array[i] == 'b':
self.throttling_array[i] = initial_n
for step in self.throttling_plan:
curr_func = self.throttling_array[int(step[0])]
if not callable(curr_func):
logger.debug(f'{curr_func} is not callable.')
logger.debug(f'Throttling array:\n{self.throttling_array}\n')
raise ExtractError(f'{curr_func} is not callable.')
first_arg = self.throttling_array[int(step[1])]
if len(step) == 2:
curr_func(first_arg)
elif len(step) == 3:
second_arg = self.throttling_array[int(step[2])]
curr_func(first_arg, second_arg)
self.calculated_n = ''.join(initial_n)
return self.calculated_n
def get_signature(self, ciphered_signature: str) -> str:
"""Decipher the signature.
Taking the ciphered signature, applies the transform functions.
:param str ciphered_signature:
The ciphered signature sent in the ``player_config``.
:rtype: str
:returns:
Decrypted signature required to download the media content.
"""
signature = list(ciphered_signature)
for js_func in self.transform_plan:
name, argument = self.parse_function(js_func) # type: ignore
signature = self.transform_map[name](signature, argument)
logger.debug(
"applied transform function\n"
"output: %s\n"
"js_function: %s\n"
"argument: %d\n"
"function: %s",
"".join(signature),
name,
argument,
self.transform_map[name],
)
return "".join(signature)
@cache
def parse_function(self, js_func: str) -> Tuple[str, int]:
"""Parse the Javascript transform function.
Break a JavaScript transform function down into a two element ``tuple``
containing the function name and some integer-based argument.
:param str js_func:
The JavaScript version of the transform function.
:rtype: tuple
:returns:
two element tuple containing the function name and an argument.
**Example**:
parse_function('DE.AJ(a,15)')
('AJ', 15)
"""
logger.debug("parsing transform function")
for pattern in self.js_func_patterns:
regex = re.compile(pattern)
parse_match = regex.search(js_func)
if parse_match:
fn_name, fn_arg = parse_match.groups()
return fn_name, int(fn_arg)
raise RegexMatchError(
caller="parse_function", pattern="js_func_patterns"
)
def get_initial_function_name(js: str) -> str:
"""Extract the name of the function responsible for computing the signature.
:param str js:
The contents of the base.js asset file.
:rtype: str
:returns:
Function name from regex match
"""
function_patterns = [
r"\b[cs]\s*&&\s*[adf]\.set\([^,]+\s*,\s*encodeURIComponent\s*\(\s*(?P<sig>[a-zA-Z0-9$]+)\(", # noqa: E501
r"\b[a-zA-Z0-9]+\s*&&\s*[a-zA-Z0-9]+\.set\([^,]+\s*,\s*encodeURIComponent\s*\(\s*(?P<sig>[a-zA-Z0-9$]+)\(", # noqa: E501
r'(?:\b|[^a-zA-Z0-9$])(?P<sig>[a-zA-Z0-9$]{2})\s*=\s*function\(\s*a\s*\)\s*{\s*a\s*=\s*a\.split\(\s*""\s*\)', # noqa: E501
r'(?P<sig>[a-zA-Z0-9$]+)\s*=\s*function\(\s*a\s*\)\s*{\s*a\s*=\s*a\.split\(\s*""\s*\)', # noqa: E501
r'(["\'])signature\1\s*,\s*(?P<sig>[a-zA-Z0-9$]+)\(',
r"\.sig\|\|(?P<sig>[a-zA-Z0-9$]+)\(",
r"yt\.akamaized\.net/\)\s*\|\|\s*.*?\s*[cs]\s*&&\s*[adf]\.set\([^,]+\s*,\s*(?:encodeURIComponent\s*\()?\s*(?P<sig>[a-zA-Z0-9$]+)\(", # noqa: E501
r"\b[cs]\s*&&\s*[adf]\.set\([^,]+\s*,\s*(?P<sig>[a-zA-Z0-9$]+)\(", # noqa: E501
r"\b[a-zA-Z0-9]+\s*&&\s*[a-zA-Z0-9]+\.set\([^,]+\s*,\s*(?P<sig>[a-zA-Z0-9$]+)\(", # noqa: E501
r"\bc\s*&&\s*a\.set\([^,]+\s*,\s*\([^)]*\)\s*\(\s*(?P<sig>[a-zA-Z0-9$]+)\(", # noqa: E501
r"\bc\s*&&\s*[a-zA-Z0-9]+\.set\([^,]+\s*,\s*\([^)]*\)\s*\(\s*(?P<sig>[a-zA-Z0-9$]+)\(", # noqa: E501
r"\bc\s*&&\s*[a-zA-Z0-9]+\.set\([^,]+\s*,\s*\([^)]*\)\s*\(\s*(?P<sig>[a-zA-Z0-9$]+)\(", # noqa: E501
]
logger.debug("finding initial function name")
for pattern in function_patterns:
regex = re.compile(pattern)
function_match = regex.search(js)
if function_match:
logger.debug("finished regex search, matched: %s", pattern)
return function_match.group(1)
raise RegexMatchError(
caller="get_initial_function_name", pattern="multiple"
)
def get_transform_plan(js: str) -> List[str]:
"""Extract the "transform plan".
The "transform plan" is the functions that the ciphered signature is
cycled through to obtain the actual signature.
:param str js:
The contents of the base.js asset file.
**Example**:
['DE.AJ(a,15)',
'DE.VR(a,3)',
'DE.AJ(a,51)',
'DE.VR(a,3)',
'DE.kT(a,51)',
'DE.kT(a,8)',
'DE.VR(a,3)',
'DE.kT(a,21)']
"""
name = re.escape(get_initial_function_name(js))
pattern = r"%s=function\(\w\){[a-z=\.\(\"\)]*;(.*);(?:.+)}" % name
logger.debug("getting transform plan")
return regex_search(pattern, js, group=1).split(";")
def get_transform_object(js: str, var: str) -> List[str]:
"""Extract the "transform object".
The "transform object" contains the function definitions referenced in the
"transform plan". The ``var`` argument is the obfuscated variable name
which contains these functions, for example, given the function call
``DE.AJ(a,15)`` returned by the transform plan, "DE" would be the var.
:param str js:
The contents of the base.js asset file.
:param str var:
The obfuscated variable name that stores an object with all functions
that descrambles the signature.
**Example**:
>>> get_transform_object(js, 'DE')
['AJ:function(a){a.reverse()}',
'VR:function(a,b){a.splice(0,b)}',
'kT:function(a,b){var c=a[0];a[0]=a[b%a.length];a[b]=c}']
"""
pattern = r"var %s={(.*?)};" % re.escape(var)
logger.debug("getting transform object")
regex = re.compile(pattern, flags=re.DOTALL)
transform_match = regex.search(js)
if not transform_match:
raise RegexMatchError(caller="get_transform_object", pattern=pattern)
return transform_match.group(1).replace("\n", " ").split(", ")
def get_transform_map(js: str, var: str) -> Dict:
"""Build a transform function lookup.
Build a lookup table of obfuscated JavaScript function names to the
Python equivalents.
:param str js:
The contents of the base.js asset file.
:param str var:
The obfuscated variable name that stores an object with all functions
that descrambles the signature.
"""
transform_object = get_transform_object(js, var)
mapper = {}
for obj in transform_object:
# AJ:function(a){a.reverse()} => AJ, function(a){a.reverse()}
name, function = obj.split(":", 1)
fn = map_functions(function)
mapper[name] = fn
return mapper
def get_throttling_function_name(js: str) -> str:
"""Extract the name of the function that computes the throttling parameter.
:param str js:
The contents of the base.js asset file.
:rtype: str
:returns:
The name of the function used to compute the throttling parameter.
"""
function_patterns = [
# https://github.com/ytdl-org/youtube-dl/issues/29326#issuecomment-865985377
# https://github.com/yt-dlp/yt-dlp/commit/48416bc4a8f1d5ff07d5977659cb8ece7640dcd8
# var Bpa = [iha];
# ...
# a.C && (b = a.get("n")) && (b = Bpa[0](b), a.set("n", b),
# Bpa.length || iha("")) }};
# In the above case, `iha` is the relevant function name
r'a\.[a-zA-Z]\s*&&\s*\([a-z]\s*=\s*a\.get\("n"\)\)\s*&&\s*'
r'\([a-z]\s*=\s*([a-zA-Z0-9$]+)(\[\d+\])?\([a-z]\)',
]
logger.debug('Finding throttling function name')
for pattern in function_patterns:
regex = re.compile(pattern)
function_match = regex.search(js)
if function_match:
logger.debug("finished regex search, matched: %s", pattern)
if len(function_match.groups()) == 1:
return function_match.group(1)
idx = function_match.group(2)
if idx:
idx = idx.strip("[]")
array = re.search(
r'var {nfunc}\s*=\s*(\[.+?\]);'.format(
nfunc=re.escape(function_match.group(1))),
js
)
if array:
array = array.group(1).strip("[]").split(",")
array = [x.strip() for x in array]
return array[int(idx)]
raise RegexMatchError(
caller="get_throttling_function_name", pattern="multiple"
)
def get_throttling_function_code(js: str) -> str:
"""Extract the raw code for the throttling function.
:param str js:
The contents of the base.js asset file.
:rtype: str
:returns:
The name of the function used to compute the throttling parameter.
"""
# Begin by extracting the correct function name
name = re.escape(get_throttling_function_name(js))
# Identify where the function is defined
pattern_start = r"%s=function\(\w\)" % name
regex = re.compile(pattern_start)
match = regex.search(js)
# Extract the code within curly braces for the function itself, and merge any split lines
code_lines_list = find_object_from_startpoint(js, match.span()[1]).split('\n')
joined_lines = "".join(code_lines_list)
# Prepend function definition (e.g. `Dea=function(a)`)
return match.group(0) + joined_lines
def get_throttling_function_array(js: str) -> List[Any]:
"""Extract the "c" array.
:param str js:
The contents of the base.js asset file.
:returns:
The array of various integers, arrays, and functions.
"""
raw_code = get_throttling_function_code(js)
array_start = r",c=\["
array_regex = re.compile(array_start)
match = array_regex.search(raw_code)
array_raw = find_object_from_startpoint(raw_code, match.span()[1] - 1)
str_array = throttling_array_split(array_raw)
converted_array = []
for el in str_array:
try:
converted_array.append(int(el))
continue
except ValueError:
# Not an integer value.
pass
if el == 'null':
converted_array.append(None)
continue
if el.startswith('"') and el.endswith('"'):
# Convert e.g. '"abcdef"' to string without quotation marks, 'abcdef'
converted_array.append(el[1:-1])
continue
if el.startswith('function'):
mapper = (
(r"{for\(\w=\(\w%\w\.length\+\w\.length\)%\w\.length;\w--;\)\w\.unshift\(\w.pop\(\)\)}", throttling_unshift), # noqa:E501
(r"{\w\.reverse\(\)}", throttling_reverse),
(r"{\w\.push\(\w\)}", throttling_push),
(r";var\s\w=\w\[0\];\w\[0\]=\w\[\w\];\w\[\w\]=\w}", throttling_swap),
(r"case\s\d+", throttling_cipher_function),
(r"\w\.splice\(0,1,\w\.splice\(\w,1,\w\[0\]\)\[0\]\)", throttling_nested_splice), # noqa:E501
(r";\w\.splice\(\w,1\)}", js_splice),
(r"\w\.splice\(-\w\)\.reverse\(\)\.forEach\(function\(\w\){\w\.unshift\(\w\)}\)", throttling_prepend), # noqa:E501
(r"for\(var \w=\w\.length;\w;\)\w\.push\(\w\.splice\(--\w,1\)\[0\]\)}", throttling_reverse), # noqa:E501
)
found = False
for pattern, fn in mapper:
if re.search(pattern, el):
converted_array.append(fn)
found = True
if found:
continue
converted_array.append(el)
# Replace null elements with array itself
for i in range(len(converted_array)):
if converted_array[i] is None:
converted_array[i] = converted_array
return converted_array
def get_throttling_plan(js: str):
"""Extract the "throttling plan".
The "throttling plan" is a list of tuples used for calling functions
in the c array. The first element of the tuple is the index of the
function to call, and any remaining elements of the tuple are arguments
to pass to that function.
:param str js:
The contents of the base.js asset file.
:returns:
The full function code for computing the throttlign parameter.
"""
raw_code = get_throttling_function_code(js)
transform_start = r"try{"
plan_regex = re.compile(transform_start)
match = plan_regex.search(raw_code)
transform_plan_raw = find_object_from_startpoint(raw_code, match.span()[1] - 1)
# Steps are either c[x](c[y]) or c[x](c[y],c[z])
step_start = r"c\[(\d+)\]\(c\[(\d+)\](,c(\[(\d+)\]))?\)"
step_regex = re.compile(step_start)
matches = step_regex.findall(transform_plan_raw)
transform_steps = []
for match in matches:
if match[4] != '':
transform_steps.append((match[0],match[1],match[4]))
else:
transform_steps.append((match[0],match[1]))
return transform_steps
def reverse(arr: List, _: Optional[Any]):
"""Reverse elements in a list.
This function is equivalent to:
.. code-block:: javascript
function(a, b) { a.reverse() }
This method takes an unused ``b`` variable as their transform functions
universally sent two arguments.
**Example**:
>>> reverse([1, 2, 3, 4])
[4, 3, 2, 1]
"""
return arr[::-1]
def splice(arr: List, b: int):
"""Add/remove items to/from a list.
This function is equivalent to:
.. code-block:: javascript
function(a, b) { a.splice(0, b) }
**Example**:
>>> splice([1, 2, 3, 4], 2)
[1, 2]
"""
return arr[b:]
def swap(arr: List, b: int):
"""Swap positions at b modulus the list length.
This function is equivalent to:
.. code-block:: javascript
function(a, b) { var c=a[0];a[0]=a[b%a.length];a[b]=c }
**Example**:
>>> swap([1, 2, 3, 4], 2)
[3, 2, 1, 4]
"""
r = b % len(arr)
return list(chain([arr[r]], arr[1:r], [arr[0]], arr[r + 1 :]))
def throttling_reverse(arr: list):
"""Reverses the input list.
Needs to do an in-place reversal so that the passed list gets changed.
To accomplish this, we create a reversed copy, and then change each
indvidual element.
"""
reverse_copy = arr.copy()[::-1]
for i in range(len(reverse_copy)):
arr[i] = reverse_copy[i]
def throttling_push(d: list, e: Any):
"""Pushes an element onto a list."""
d.append(e)
def throttling_mod_func(d: list, e: int):
"""Perform the modular function from the throttling array functions.
In the javascript, the modular operation is as follows:
e = (e % d.length + d.length) % d.length
We simply translate this to python here.
"""
return (e % len(d) + len(d)) % len(d)
def throttling_unshift(d: list, e: int):
"""Rotates the elements of the list to the right.
In the javascript, the operation is as follows:
for(e=(e%d.length+d.length)%d.length;e--;)d.unshift(d.pop())
"""
e = throttling_mod_func(d, e)
new_arr = d[-e:] + d[:-e]
d.clear()
for el in new_arr:
d.append(el)
def throttling_cipher_function(d: list, e: str):
"""This ciphers d with e to generate a new list.
In the javascript, the operation is as follows:
var h = [A-Za-z0-9-_], f = 96; // simplified from switch-case loop
d.forEach(
function(l,m,n){
this.push(
n[m]=h[
(h.indexOf(l)-h.indexOf(this[m])+m-32+f--)%h.length
]
)
},
e.split("")
)
"""
h = list('ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789-_')
f = 96
# by naming it "this" we can more closely reflect the js
this = list(e)
# This is so we don't run into weirdness with enumerate while
# we change the input list
copied_list = d.copy()
for m, l in enumerate(copied_list):
bracket_val = (h.index(l) - h.index(this[m]) + m - 32 + f) % len(h)
this.append(
h[bracket_val]
)
d[m] = h[bracket_val]
f -= 1
def throttling_nested_splice(d: list, e: int):
"""Nested splice function in throttling js.
In the javascript, the operation is as follows:
function(d,e){
e=(e%d.length+d.length)%d.length;
d.splice(
0,
1,
d.splice(
e,
1,
d[0]
)[0]
)
}
While testing, all this seemed to do is swap element 0 and e,
but the actual process is preserved in case there was an edge
case that was not considered.
"""
e = throttling_mod_func(d, e)
inner_splice = js_splice(
d,
e,
1,
d[0]
)
js_splice(
d,
0,
1,
inner_splice[0]
)
def throttling_prepend(d: list, e: int):
"""
In the javascript, the operation is as follows:
function(d,e){
e=(e%d.length+d.length)%d.length;
d.splice(-e).reverse().forEach(
function(f){
d.unshift(f)
}
)
}
Effectively, this moves the last e elements of d to the beginning.
"""
start_len = len(d)
# First, calculate e
e = throttling_mod_func(d, e)
# Then do the prepending
new_arr = d[-e:] + d[:-e]
# And update the input list
d.clear()
for el in new_arr:
d.append(el)
end_len = len(d)
assert start_len == end_len
def throttling_swap(d: list, e: int):
"""Swap positions of the 0'th and e'th elements in-place."""
e = throttling_mod_func(d, e)
f = d[0]
d[0] = d[e]
d[e] = f
def js_splice(arr: list, start: int, delete_count=None, *items):
"""Implementation of javascript's splice function.
:param list arr:
Array to splice
:param int start:
Index at which to start changing the array
:param int delete_count:
Number of elements to delete from the array
:param *items:
Items to add to the array
Reference: https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/Array/splice # noqa:E501
"""
# Special conditions for start value
try:
if start > len(arr):
start = len(arr)
# If start is negative, count backwards from end
if start < 0:
start = len(arr) - start
except TypeError:
# Non-integer start values are treated as 0 in js
start = 0
# Special condition when delete_count is greater than remaining elements
if not delete_count or delete_count >= len(arr) - start:
delete_count = len(arr) - start # noqa: N806
deleted_elements = arr[start:start + delete_count]
# Splice appropriately.
new_arr = arr[:start] + list(items) + arr[start + delete_count:]
# Replace contents of input array
arr.clear()
for el in new_arr:
arr.append(el)
return deleted_elements
def map_functions(js_func: str) -> Callable:
"""For a given JavaScript transform function, return the Python equivalent.
:param str js_func:
The JavaScript version of the transform function.
"""
mapper = (
# function(a){a.reverse()}
(r"{\w\.reverse\(\)}", reverse),
# function(a,b){a.splice(0,b)}
(r"{\w\.splice\(0,\w\)}", splice),
# function(a,b){var c=a[0];a[0]=a[b%a.length];a[b]=c}
(r"{var\s\w=\w\[0\];\w\[0\]=\w\[\w\%\w.length\];\w\[\w\]=\w}", swap),
# function(a,b){var c=a[0];a[0]=a[b%a.length];a[b%a.length]=c}
(
r"{var\s\w=\w\[0\];\w\[0\]=\w\[\w\%\w.length\];\w\[\w\%\w.length\]=\w}",
swap,
),
)
for pattern, fn in mapper:
if re.search(pattern, js_func):
return fn
raise RegexMatchError(caller="map_functions", pattern="multiple")
|
unlicense
|
9409df7792d2d6d6536ad2fa31022a4e
| 31.322812
| 154
| 0.566337
| 3.357526
| false
| false
| false
| false
|
pytube/pytube
|
pytube/request.py
|
1
|
8512
|
"""Implements a simple wrapper around urlopen."""
import http.client
import json
import logging
import re
import socket
from functools import lru_cache
from urllib import parse
from urllib.error import URLError
from urllib.request import Request, urlopen
from pytube.exceptions import RegexMatchError, MaxRetriesExceeded
from pytube.helpers import regex_search
logger = logging.getLogger(__name__)
default_range_size = 9437184 # 9MB
def _execute_request(
url,
method=None,
headers=None,
data=None,
timeout=socket._GLOBAL_DEFAULT_TIMEOUT
):
base_headers = {"User-Agent": "Mozilla/5.0", "accept-language": "en-US,en"}
if headers:
base_headers.update(headers)
if data:
# encode data for request
if not isinstance(data, bytes):
data = bytes(json.dumps(data), encoding="utf-8")
if url.lower().startswith("http"):
request = Request(url, headers=base_headers, method=method, data=data)
else:
raise ValueError("Invalid URL")
return urlopen(request, timeout=timeout) # nosec
def get(url, extra_headers=None, timeout=socket._GLOBAL_DEFAULT_TIMEOUT):
"""Send an http GET request.
:param str url:
The URL to perform the GET request for.
:param dict extra_headers:
Extra headers to add to the request
:rtype: str
:returns:
UTF-8 encoded string of response
"""
if extra_headers is None:
extra_headers = {}
response = _execute_request(url, headers=extra_headers, timeout=timeout)
return response.read().decode("utf-8")
def post(url, extra_headers=None, data=None, timeout=socket._GLOBAL_DEFAULT_TIMEOUT):
"""Send an http POST request.
:param str url:
The URL to perform the POST request for.
:param dict extra_headers:
Extra headers to add to the request
:param dict data:
The data to send on the POST request
:rtype: str
:returns:
UTF-8 encoded string of response
"""
# could technically be implemented in get,
# but to avoid confusion implemented like this
if extra_headers is None:
extra_headers = {}
if data is None:
data = {}
# required because the youtube servers are strict on content type
# raises HTTPError [400]: Bad Request otherwise
extra_headers.update({"Content-Type": "application/json"})
response = _execute_request(
url,
headers=extra_headers,
data=data,
timeout=timeout
)
return response.read().decode("utf-8")
def seq_stream(
url,
timeout=socket._GLOBAL_DEFAULT_TIMEOUT,
max_retries=0
):
"""Read the response in sequence.
:param str url: The URL to perform the GET request for.
:rtype: Iterable[bytes]
"""
# YouTube expects a request sequence number as part of the parameters.
split_url = parse.urlsplit(url)
base_url = '%s://%s/%s?' % (split_url.scheme, split_url.netloc, split_url.path)
querys = dict(parse.parse_qsl(split_url.query))
# The 0th sequential request provides the file headers, which tell us
# information about how the file is segmented.
querys['sq'] = 0
url = base_url + parse.urlencode(querys)
segment_data = b''
for chunk in stream(url, timeout=timeout, max_retries=max_retries):
yield chunk
segment_data += chunk
# We can then parse the header to find the number of segments
stream_info = segment_data.split(b'\r\n')
segment_count_pattern = re.compile(b'Segment-Count: (\\d+)')
for line in stream_info:
match = segment_count_pattern.search(line)
if match:
segment_count = int(match.group(1).decode('utf-8'))
# We request these segments sequentially to build the file.
seq_num = 1
while seq_num <= segment_count:
# Create sequential request URL
querys['sq'] = seq_num
url = base_url + parse.urlencode(querys)
yield from stream(url, timeout=timeout, max_retries=max_retries)
seq_num += 1
return # pylint: disable=R1711
def stream(
url,
timeout=socket._GLOBAL_DEFAULT_TIMEOUT,
max_retries=0
):
"""Read the response in chunks.
:param str url: The URL to perform the GET request for.
:rtype: Iterable[bytes]
"""
file_size: int = default_range_size # fake filesize to start
downloaded = 0
while downloaded < file_size:
stop_pos = min(downloaded + default_range_size, file_size) - 1
range_header = f"bytes={downloaded}-{stop_pos}"
tries = 0
# Attempt to make the request multiple times as necessary.
while True:
# If the max retries is exceeded, raise an exception
if tries >= 1 + max_retries:
raise MaxRetriesExceeded()
# Try to execute the request, ignoring socket timeouts
try:
response = _execute_request(
url,
method="GET",
headers={"Range": range_header},
timeout=timeout
)
except URLError as e:
# We only want to skip over timeout errors, and
# raise any other URLError exceptions
if isinstance(e.reason, socket.timeout):
pass
else:
raise
except http.client.IncompleteRead:
# Allow retries on IncompleteRead errors for unreliable connections
pass
else:
# On a successful request, break from loop
break
tries += 1
if file_size == default_range_size:
try:
content_range = response.info()["Content-Range"]
file_size = int(content_range.split("/")[1])
except (KeyError, IndexError, ValueError) as e:
logger.error(e)
while True:
chunk = response.read()
if not chunk:
break
downloaded += len(chunk)
yield chunk
return # pylint: disable=R1711
@lru_cache()
def filesize(url):
"""Fetch size in bytes of file at given URL
:param str url: The URL to get the size of
:returns: int: size in bytes of remote file
"""
return int(head(url)["content-length"])
@lru_cache()
def seq_filesize(url):
"""Fetch size in bytes of file at given URL from sequential requests
:param str url: The URL to get the size of
:returns: int: size in bytes of remote file
"""
total_filesize = 0
# YouTube expects a request sequence number as part of the parameters.
split_url = parse.urlsplit(url)
base_url = '%s://%s/%s?' % (split_url.scheme, split_url.netloc, split_url.path)
querys = dict(parse.parse_qsl(split_url.query))
# The 0th sequential request provides the file headers, which tell us
# information about how the file is segmented.
querys['sq'] = 0
url = base_url + parse.urlencode(querys)
response = _execute_request(
url, method="GET"
)
response_value = response.read()
# The file header must be added to the total filesize
total_filesize += len(response_value)
# We can then parse the header to find the number of segments
segment_count = 0
stream_info = response_value.split(b'\r\n')
segment_regex = b'Segment-Count: (\\d+)'
for line in stream_info:
# One of the lines should contain the segment count, but we don't know
# which, so we need to iterate through the lines to find it
try:
segment_count = int(regex_search(segment_regex, line, 1))
except RegexMatchError:
pass
if segment_count == 0:
raise RegexMatchError('seq_filesize', segment_regex)
# We make HEAD requests to the segments sequentially to find the total filesize.
seq_num = 1
while seq_num <= segment_count:
# Create sequential request URL
querys['sq'] = seq_num
url = base_url + parse.urlencode(querys)
total_filesize += int(head(url)['content-length'])
seq_num += 1
return total_filesize
def head(url):
"""Fetch headers returned http GET request.
:param str url:
The URL to perform the GET request for.
:rtype: dict
:returns:
dictionary of lowercase headers
"""
response_headers = _execute_request(url, method="HEAD").info()
return {k.lower(): v for k, v in response_headers.items()}
|
unlicense
|
7717abb37be2a302ac4afc9880d28652
| 31.120755
| 85
| 0.620301
| 4.112077
| false
| false
| false
| false
|
mozilla-iam/cis
|
python-modules/cis_crypto/cis_crypto/operation.py
|
1
|
5491
|
import json
import logging
import os
import yaml
from jose import jwk
from jose import jws
from jose.exceptions import JWSError
from cis_crypto import secret
from cis_crypto import common
logger = logging.getLogger(__name__)
# Note:
# These attrs on sign/verify could be refactored to use object inheritance. Leaving as is for now for readability.
class Sign(object):
def __init__(self):
self.config = common.get_config()
self.key_name = self.config("signing_key_name", namespace="cis", default="file")
self._jwk = None
self.secret_manager = self.config("secret_manager", namespace="cis", default="file")
self.payload = None
def load(self, data):
"""Loads a payload to the object and ensures that the thing is serializable."""
try:
data = yaml.safe_load(data)
except yaml.scanner.ScannerError:
logger.debug("This file is likely not YAML. Attempting JSON load.")
except AttributeError:
logger.debug("This file is likely not YAML. Attempting JSON load.")
if isinstance(data, str):
data = json.loads(data)
else:
pass
self.payload = data
return self.payload
def jws(self, keyname=None):
"""Assumes you loaded a payload. Returns a jws."""
# Override key name
if keyname is not None:
self.key_name = keyname
key_jwk = self._get_key()
sig = jws.sign(self.payload, key_jwk.to_dict(), algorithm="RS256")
return sig
def _get_key(self):
if self._jwk is None:
manager = secret.Manager(provider_type=self.secret_manager)
self._jwk = manager.get_key(key_name=self.key_name)
return self._jwk
class Verify(object):
def __init__(self):
self.config = common.get_config()
# Provide file or URL as opts.
self.well_known_mode = self.config("well_known_mode", namespace="cis", default="file")
self.public_key_name = None # Optional for use with file based well known mode
self.jws_signature = None
self.well_known = None # Well known JSON data
def load(self, jws_signature):
"""Takes data in the form of a dict() and a JWS sig."""
# Store the original form in the jws_signature attribute
self.jws_signature = jws_signature
def _get_public_key(self, keyname=None):
"""Returns a jwk construct for the public key and mode specified."""
if self.well_known_mode == "file":
key_dir = self.config(
"secret_manager_file_path",
namespace="cis",
default=("{}/.mozilla-iam/keys/".format(os.path.expanduser("~"))),
)
key_name = self.config("public_key_name", namespace="cis", default="access-file-key")
file_name = "{}".format(key_name)
fh = open((os.path.join(key_dir, file_name)), "rb")
key_content = fh.read()
key_construct = jwk.construct(key_content, "RS256")
return [key_construct.to_dict()]
elif self.well_known_mode == "http" or self.well_known_mode == "https":
logger.debug("Well known mode engaged. Reducing key structure.", extra={"well_known": self.well_known})
return self._reduce_keys(keyname)
def _reduce_keys(self, keyname):
access_file_keys = self.well_known["access_file"]["jwks"]["keys"]
publishers_supported = self.well_known["api"]["publishers_jwks"]
keys = []
if "access-file-key" in self.config("public_key_name", namespace="cis"):
logger.debug("This is an access file verification.")
return access_file_keys
else:
# If not an access key verification this will attempt to verify against any listed publisher.
keys = publishers_supported[keyname]["keys"]
logger.debug("Publisher based verification, will use {} public keys for verification.".format(keys))
return keys
def jws(self, keyname=None):
"""Assumes you loaded a payload. Return the same jws or raise a custom exception."""
key_material = self._get_public_key(keyname)
logger.debug(
"The key material for the payload was loaded for: {}".format(keyname), extra={"key_material": key_material}
)
if isinstance(key_material, list):
logger.debug("Multiple keys returned. Attempting match.")
for key in key_material:
try:
key.pop("x5t", None)
key.pop("x5c", None)
except AttributeError:
logger.warn("x5t and x5c attrs do not exist in key material.")
logger.debug("Attempting to match against: {}".format(key))
try:
sig = jws.verify(self.jws_signature, key, algorithms="RS256", verify=True)
logger.debug(
"Matched a verified signature for: {}".format(key), extra={"signature": self.jws_signature}
)
return sig
except JWSError as e:
logger.error(
"The signature was not valid for the payload.", extra={"signature": self.jws_signature}
)
logger.error(e)
raise JWSError("The signature could not be verified for any trusted key", key_material)
|
mpl-2.0
|
41f5b14b1044c337dd5dde3addfa3cd4
| 40.285714
| 119
| 0.590056
| 4.082528
| false
| true
| false
| false
|
mozilla-iam/cis
|
python-modules/cis_logger/cis_logger/__init__.py
|
1
|
1579
|
import logging.handlers
from pythonjsonlogger import jsonlogger
import datetime
class JsonFormatter(jsonlogger.JsonFormatter, object):
def __init__(
self,
fmt="%(asctime) %(name) %(processName) %(filename) \
%(funcName) %(levelname) %(lineno) %(module) %(threadName) %(message)",
datefmt="%Y-%m-%dT%H:%M:%SZ%z",
style="%",
extra={},
*args,
**kwargs
):
self._extra = extra
jsonlogger.JsonFormatter.__init__(self, fmt=fmt, datefmt=datefmt, *args, **kwargs)
def process_log_record(self, log_record):
if "asctime" in log_record:
log_record["timestamp"] = log_record["asctime"]
else:
log_record["timestamp"] = datetime.datetime.now(datetime.timezone.utc).strftime("%Y-%m-%dT%H:%M:%S.%fZ%z")
if self._extra is not None:
for key, value in self._extra.items():
log_record[key] = value
return super(JsonFormatter, self).process_log_record(log_record)
class SysLogJsonHandler(logging.handlers.SysLogHandler, object):
def __init__(
self,
address=("localhost", logging.handlers.SYSLOG_UDP_PORT),
facility=logging.handlers.SysLogHandler.LOG_USER,
socktype=None,
prefix="",
):
super(SysLogJsonHandler, self).__init__(address, facility, socktype)
self._prefix = prefix
if self._prefix != "":
self._prefix = prefix + ": "
def format(self, record):
return self._prefix + super(SysLogJsonHandler, self).format(record)
|
mpl-2.0
|
d294b79b643857c42fe48606e33c9387
| 33.326087
| 118
| 0.59658
| 3.879607
| false
| false
| false
| false
|
ibm-watson-iot/iot-python
|
src/wiotp/sdk/api/registry/devices.py
|
2
|
15894
|
# *****************************************************************************
# Copyright (c) 2018 IBM Corporation and other Contributors.
#
# All rights reserved. This program and the accompanying materials
# are made available under the terms of the Eclipse Public License v1.0
# which accompanies this distribution, and is available at
# http://www.eclipse.org/legal/epl-v10.html
# *****************************************************************************
import iso8601
from datetime import datetime
import json
from collections import defaultdict
from wiotp.sdk.api.common import IterableList
from wiotp.sdk.exceptions import ApiException
from wiotp.sdk.api.registry.diag import DeviceLogs, DeviceErrorCodes
class LogEntry(defaultdict):
def __init__(self, **kwargs):
if not set(["message", "timestamp"]).issubset(kwargs):
raise Exception("message and timestamp are required properties for a LogEntry")
kwargs["timestamp"] = iso8601.parse_date(kwargs["timestamp"])
dict.__init__(self, **kwargs)
@property
def message(self):
return self["message"]
@property
def timestamp(self):
return self["timestamp"]
class DeviceUid(defaultdict):
def __init__(self, **kwargs):
if not set(["deviceId", "typeId"]).issubset(kwargs):
raise Exception("typeId and deviceId are required properties to uniquely identify a device")
dict.__init__(self, **kwargs)
@property
def typeId(self):
return self["typeId"]
@property
def deviceId(self):
return self["deviceId"]
def __str__(self):
return self["typeId"] + ":" + self["deviceId"]
def __repr__(self):
return json.dumps(self, sort_keys=True, indent=2)
class DeviceCreateRequest(defaultdict):
def __init__(self, typeId, deviceId, authToken=None, deviceInfo=None, location=None, metadata=None):
dict.__init__(
self,
typeId=typeId,
deviceId=deviceId,
authToken=authToken,
deviceInfo=deviceInfo,
location=location,
metadata=metadata,
)
@property
def typeId(self):
return self["typeId"]
@property
def deviceId(self):
return self["deviceId"]
@property
def authToken(self):
return self["authToken"]
@property
def deviceInfo(self):
return DeviceInfo(**self["deviceInfo"])
@property
def location(self):
return self["location"]
@property
def metadata(self):
return self["metadata"]
class DeviceLocation(defaultdict):
def __init__(self, **kwargs):
if not set(["latitude", "longitude"]).issubset(kwargs):
raise Exception("Data passed to Device is not correct: %s" % (json.dumps(kwargs, sort_keys=True)))
if "measuredDateTime" in kwargs and not isinstance(kwargs["measuredDateTime"], datetime):
kwargs["measuredDateTime"] = iso8601.parse_date(kwargs["measuredDateTime"])
if "updatedDateTime" in kwargs and not isinstance(kwargs["updatedDateTime"], datetime):
kwargs["updatedDateTime"] = iso8601.parse_date(kwargs["updatedDateTime"])
dict.__init__(self, **kwargs)
@property
def latitude(self):
return self["latitude"]
@property
def longitude(self):
return self["longitude"]
@property
def measuredDateTime(self):
return self.get("measuredDateTime", None)
@property
def updatedDateTime(self):
return self.get("updatedDateTime", None)
class DeviceCreateResponse(defaultdict):
def __init__(self, **kwargs):
dict.__init__(self, **kwargs)
@property
def typeId(self):
return self["typeId"]
@property
def deviceId(self):
return self["deviceId"]
@property
def success(self):
return self.get("success", None)
@property
def authToken(self):
return self["authToken"]
class DeviceInfo(defaultdict):
def __init__(
self,
description=None,
deviceClass=None,
fwVersion=None,
hwVersion=None,
manufacturer=None,
model=None,
serialNumber=None,
descriptiveLocation=None,
):
dict.__init__(
self,
description=description,
deviceClass=deviceClass,
fwVersion=fwVersion,
hwVersion=hwVersion,
manufacturer=manufacturer,
model=model,
serialNumber=serialNumber,
descriptiveLocation=descriptiveLocation,
)
@property
def description(self):
return self["description"]
@property
def deviceClass(self):
return self["deviceClass"]
@property
def fwVersion(self):
return self["fwVersion"]
@property
def hwVersion(self):
return self["hwVersion"]
@property
def manufacturer(self):
return self["manufacturer"]
@property
def model(self):
return self["model"]
@property
def serialNumber(self):
return self["serialNumber"]
@property
def descriptiveLocation(self):
return self["descriptiveLocation"]
class Device(defaultdict):
def __init__(self, apiClient, **kwargs):
self._apiClient = apiClient
if not set(["clientId", "deviceId", "typeId"]).issubset(kwargs):
raise Exception("Data passed to Device is not correct: %s" % (json.dumps(kwargs, sort_keys=True)))
self.diagLogs = DeviceLogs(self._apiClient, kwargs["typeId"], kwargs["deviceId"])
self.diagErrorCodes = DeviceErrorCodes(self._apiClient, kwargs["typeId"], kwargs["deviceId"])
dict.__init__(self, **kwargs)
# {u'clientId': u'xxxxxxxxx',
# u'deviceId': u'xxxxxxx',
# u'deviceInfo': {u'description': u'None (xxxxxxxx)',
# u'deviceClass': u'None',
# u'fwVersion': u'xxxxx',
# u'hwVersion': u'xxxxx',
# u'manufacturer': u'xxxx.',
# u'model': u'xxxx',
# u'serialNumber': u'xxxxxxxxx'},
# u'metadata': {},
# u'refs': {u'diag': {u'errorCodes': u'/api/v0002/device/types/xxx/devices/xxxx/diag/errorCodes',
# u'logs': u'/api/v0002/device/types/xxx/devices/xxxx/diag/logs'},
# u'location': u'/api/v0002/device/types/xxxx/devices/xxxx/location',
# u'mgmt': u'/api/v0002/device/types/xx/devices/xxxx/mgmt'},
# u'registration': {u'auth': {u'id': u'xxxxxx',
# u'type': u'person'},
# u'date': u'2015-09-18T06:44:02.000Z'},
# u'status': {u'alert': {u'enabled': False,
# u'timestamp': u'2016-01-21T02:25:55.543Z'}},
# u'typeId': u'vm'}
@property
def clientId(self):
return self["clientId"]
@property
def deviceId(self):
return self["deviceId"]
@property
def authToken(self):
if "authToken" in self:
return self["authToken"]
else:
return None
@property
def metadata(self):
if "metadata" in self:
return self["metadata"]
else:
return None
@property
def total_rows(self):
return self["total_rows"]
@property
def deviceInfo(self):
# Unpack the deviceInfo dictionary into keyword arguments so that we
# can return a DeviceInfo object instead of a plain dictionary
return DeviceInfo(**self["deviceInfo"])
@property
def typeId(self):
return self["typeId"]
def __str__(self):
return "[%s] %s" % (self.clientId, self.deviceInfo.description or "<No description>")
def __repr__(self):
return json.dumps(self, sort_keys=True, indent=2)
def json(self):
return dict(self)
# Extended properties
def getMgmt(self):
r = self._apiClient.get("api/v0002/device/types/%s/devices/%s/mgmt" % (self.typeId, self.deviceId))
if r.status_code == 200:
return r.json()
if r.status_code == 404:
# It's perfectly valid for a device to not have a location set, if this is the case, set response to None
return None
else:
raise ApiException(r)
def getLocation(self):
r = self._apiClient.get("api/v0002/device/types/%s/devices/%s/location" % (self.typeId, self.deviceId))
if r.status_code == 200:
return DeviceLocation(**r.json())
if r.status_code == 404:
# It's perfectly valid for a device to not have a location set, if this is the case, set response to None
return None
else:
raise ApiException(r)
def setLocation(self, value):
r = self._apiClient.put("api/v0002/device/types/%s/devices/%s/location" % (self.typeId, self.deviceId), value)
if r.status_code == 200:
return DeviceLocation(**r.json())
else:
raise ApiException(r)
def getConnectionLogs(self):
r = self._apiClient.get(
"api/v0002/logs/connection", parameters={"typeId": self.typeId, "deviceId": self.deviceId}
)
if r.status_code == 200:
responseList = []
for entry in r.json():
responseList.append(LogEntry(**entry))
return responseList
else:
raise ApiException(r)
class IterableDeviceList(IterableList):
def __init__(self, apiClient, typeId=None):
if typeId is None:
super(IterableDeviceList, self).__init__(apiClient, Device, "api/v0002/bulk/devices", "typeId,deviceId")
else:
super(IterableDeviceList, self).__init__(
apiClient, Device, "api/v0002/device/types/%s/devices/" % (typeId), "deviceId"
)
class Devices(defaultdict):
"""
Use the global unique identifier of a device, it's `clientId` to address devices.
# Delete
```python
del devices["d:orgId:typeId:deviceId"]
```
# Get
Use the global unique identifier of a device, it's `clientId`.
```python
device = devices["d:orgId:typeId:deviceId"]
print(device.clientId)
print(device)
# Is a device registered?
```python
if "d:orgId:typeId:deviceId" in devices:
print("The device exists")
```
# Iterate through all registered devices
```python
for device in devices:
print(device)
```
"""
# https://docs.python.org/2/library/collections.html#defaultdict-objects
def __init__(self, apiClient, typeId=None):
self._apiClient = apiClient
self.typeId = typeId
def __contains__(self, key):
"""
Does a device exist?
"""
if self.typeId is None:
(classIdentifier, orgId, typeId, deviceId) = key.split(":")
deviceUrl = "api/v0002/device/types/%s/devices/%s" % (typeId, deviceId)
else:
deviceUrl = "api/v0002/device/types/%s/devices/%s" % (self.typeId, key)
r = self._apiClient.get(deviceUrl)
if r.status_code == 200:
return True
elif r.status_code == 404:
return False
else:
raise ApiException(r)
def __getitem__(self, key):
"""
Get a device from the registry
"""
if self.typeId is None:
(classIdentifier, orgId, typeId, deviceId) = key.split(":")
deviceUrl = "api/v0002/device/types/%s/devices/%s" % (typeId, deviceId)
else:
deviceUrl = "api/v0002/device/types/%s/devices/%s" % (self.typeId, key)
r = self._apiClient.get(deviceUrl)
if r.status_code == 200:
return Device(apiClient=self._apiClient, **r.json())
elif r.status_code == 404:
self.__missing__(key)
else:
raise ApiException(r)
def __setitem__(self, key, value):
"""
Register a new device - not currently supported via this interface, use: `registry.devices.create()`
"""
raise Exception("Unable to register or update a device via this interface at the moment.")
def __delitem__(self, key):
"""
Delete a device
"""
if self.typeId is None:
(classIdentifier, orgId, typeId, deviceId) = key.split(":")
deviceUrl = "api/v0002/device/types/%s/devices/%s" % (typeId, deviceId)
else:
deviceUrl = "api/v0002/device/types/%s/devices/%s" % (self.typeId, key)
r = self._apiClient.delete(deviceUrl)
if r.status_code == 404:
self.__missing__(key)
elif r.status_code != 204:
raise ApiException(r)
def __missing__(self, key):
"""
Device does not exist
"""
raise KeyError("Device %s does not exist" % (key))
def __iter__(self, *args, **kwargs):
"""
Iterate through all devices
"""
return IterableDeviceList(self._apiClient, self.typeId)
@property
def total_rows(self):
"""
Returns total devices
"""
return self["total_rows"]
def create(self, devices):
"""
Register one or more new devices, each request can contain a maximum of 512KB.
The response body will contain the generated authentication tokens for all devices.
You must make sure to record these tokens when processing the response.
We are not able to retrieve lost authentication tokens
It accepts accepts a list of devices (List of Dictionary of Devices), or a single device
If you provide a list as the parameter it will return a list in response
If you provide a singular device it will return a singular response
"""
if not isinstance(devices, list):
listOfDevices = [devices]
returnAsAList = False
else:
listOfDevices = devices
returnAsAList = True
r = self._apiClient.post("api/v0002/bulk/devices/add", listOfDevices)
if r.status_code in [201, 202]:
if returnAsAList:
responseList = []
for entry in r.json():
responseList.append(DeviceCreateResponse(**entry))
return responseList
else:
return DeviceCreateResponse(**r.json()[0])
else:
raise ApiException(r)
def update(self, deviceUid, metadata=None, deviceInfo=None, status=None):
"""
Update an existing device
"""
if not isinstance(deviceUid, DeviceUid) and isinstance(deviceUid, dict):
deviceUid = DeviceUid(**deviceUid)
deviceUrl = "api/v0002/device/types/%s/devices/%s" % (deviceUid.typeId, deviceUid.deviceId)
data = {"status": status, "deviceInfo": deviceInfo, "metadata": metadata}
r = self._apiClient.put(deviceUrl, data)
if r.status_code == 200:
return Device(apiClient=self._apiClient, **r.json())
else:
raise ApiException(r)
def delete(self, devices):
"""
Delete one or more devices, each request can contain a maximum of 512Kb
It accepts accepts a list of devices (List of Dictionary of Devices)
In case of failure it throws APIException
"""
if not isinstance(devices, list):
listOfDevices = [devices]
else:
listOfDevices = devices
r = self._apiClient.post("api/v0002/bulk/devices/remove", listOfDevices)
if r.status_code in [200, 202]:
return r.json()
else:
raise ApiException(r)
|
epl-1.0
|
2ee6e0ceefe91a835b0e38b3a5447d3d
| 29.624277
| 118
| 0.577702
| 4.125097
| false
| false
| false
| false
|
ibm-watson-iot/iot-python
|
src/wiotp/sdk/api/dsc/destinations.py
|
2
|
4384
|
# *****************************************************************************
# Copyright (c) 2019 IBM Corporation and other Contributors.
#
# All rights reserved. This program and the accompanying materials
# are made available under the terms of the Eclipse Public License v1.0
# which accompanies this distribution, and is available at
# http://www.eclipse.org/legal/epl-v10.html
# *****************************************************************************
from collections import defaultdict
import iso8601
from wiotp.sdk.exceptions import ApiException
from wiotp.sdk.api.common import IterableList, RestApiDict
# See docs @ https://orgid.internetofthings.ibmcloud.com/docs/v0002/historian-connector.html
class Destination(defaultdict):
def __init__(self, **kwargs):
dict.__init__(self, **kwargs)
@property
def name(self):
# Unlike most other resources name == the UUID, there is no seperate id property
return self["name"]
@property
def destinationType(self):
return self["type"]
@property
def configuration(self):
return self["configuration"]
# EventStreams only configuration
@property
def partitions(self):
if self["type"] == "eventstreams":
return self["configuration"]["partitions"]
else:
return None
# Cloudant only configuration
@property
def bucketInterval(self):
if self["type"] == "cloudant":
return self["configuration"]["bucketInterval"]
else:
return None
# Cloudant only configuration
@property
def retentionDays(self):
# this is an optional parameter so check if it exists
if "configuration" in self and "retentionDays" in self["configuration"]:
return self["configuration"]["retentionDays"]
else:
return None
# DB2/Postgres only configuration
@property
def columns(self):
# this is an optional parameter so check if it exists
if "configuration" in self and "columns" in self["configuration"]:
return self["configuration"]["columns"]
else:
return None
class IterableDestinationList(IterableList):
def __init__(self, apiClient, url, filters=None):
# This API does not support sorting
super(IterableDestinationList, self).__init__(
apiClient, Destination, url, sort=None, filters=filters, passApiClient=False
)
class Destinations(RestApiDict):
def __init__(self, apiClient, connectorId, connectorType):
super(Destinations, self).__init__(
apiClient,
Destination,
IterableDestinationList,
"api/v0002/historianconnectors/%s/destinations" % connectorId,
)
self.connectorId = connectorId
self.connectorType = connectorType
self.allDestinationsUrl = "api/v0002/historianconnectors/%s/destinations" % connectorId
def find(self, nameFilter=None):
queryParms = {}
if nameFilter:
queryParms["name"] = nameFilter
return IterableDestinationList(self._apiClient, self.allDestinationsUrl, filters=queryParms)
def create(self, name, **kwargs):
if self.connectorType == "cloudant":
if "bucketInterval" not in kwargs.keys():
raise Exception("You must specify bucketInterval parameter on create for a Cloudant destination")
if self.connectorType == "eventstreams":
if "partitions" not in kwargs.keys():
raise Exception("You must specify partitions parameter on create for an EventStreams destination")
if self.connectorType == "db2" or self.connectorType == "postgres":
if "columns" not in kwargs.keys():
raise Exception("You must specify a columns parameter on create for a DB2 or Postgres destination")
destination = {"name": name, "type": self.connectorType, "configuration": kwargs}
r = self._apiClient.post(self.allDestinationsUrl, data=destination)
if r.status_code == 201:
return Destination(**r.json())
else:
raise ApiException(r)
def update(self, key, item):
"""
Create an Item - not supported for CTIVE item
"""
raise Exception("The API doesn't support updating a destination.")
|
epl-1.0
|
2af7656e3d7285851117f5bc8a1cd1ad
| 35.231405
| 115
| 0.627053
| 4.629356
| false
| true
| false
| false
|
ibm-watson-iot/iot-python
|
src/wiotp/sdk/api/state/state.py
|
2
|
2752
|
# *****************************************************************************
# Copyright (c) 2019 IBM Corporation and other Contributors.
#
# All rights reserved. This program and the accompanying materials
# are made available under the terms of the Eclipse Public License v1.0
# which accompanies this distribution, and is available at
# http://www.eclipse.org/legal/epl-v10.html
# *****************************************************************************
from collections import defaultdict
import iso8601
from wiotp.sdk.exceptions import ApiException
from wiotp.sdk.api.common import IterableList
from wiotp.sdk.api.common import RestApiDict
from wiotp.sdk.api.common import RestApiItemBase
from wiotp.sdk.api.common import RestApiDictReadOnly
# See docs @ https://orgid.internetofthings.ibmcloud.com/docs/v0002-beta/State-mgr-beta.html
class State(defaultdict):
def __init__(self, apiClient, url, **kwargs):
self._apiClient = apiClient
self._url = url
dict.__init__(self, **kwargs)
@property
def state(self):
return self["state"]
@property
def timestamp(self):
return iso8601.parse_date(self["timestamp"])
@property
def updated(self):
return iso8601.parse_date(self["updated"])
def __callPatchOperation__(self, body):
r = self._apiClient.patch(self._url, body)
if r.status_code == 200:
return r.json()
else:
raise Exception("Unexpected response from API (%s) = %s %s" % (self._url, r.status_code, r.text))
def reset(self):
return self.__callPatchOperation__({"operation": "reset-state"})
class States(RestApiDictReadOnly):
def __init__(self, apiClient, typeId, instanceId):
url = "api/v0002/device/types/%s/devices/%s/state" % (typeId, instanceId)
super(States, self).__init__(apiClient, State, None, url)
# TBD this method overrides the base class method to pass the state URL to the constructed state
# without this, we can't invoke reset-state api call.
def __getitem__(self, key):
url = self._singleItemUrl % (key)
r = self._apiClient.get(url)
if r.status_code == 200:
return self._castToClass(apiClient=self._apiClient, url=url, **r.json())
if r.status_code == 404:
self.__missing__(key)
else:
raise ApiException(r)
# override the standard iterator as there is no api to get all state itetrating over LIs
def __iter__(self, *args, **kwargs):
raise Exception("Unable to iterate through device state. Retrieve it for a specific LI.")
def find(self, query_params={}):
raise Exception("Unable to find device state. Retrieve it for a specific LI.")
|
epl-1.0
|
58605d9eeb71bac265a21eb036021baf
| 36.69863
| 109
| 0.628634
| 3.97114
| false
| false
| false
| false
|
ibm-watson-iot/iot-python
|
src/wiotp/sdk/api/usage/__init__.py
|
2
|
2241
|
# *****************************************************************************
# Copyright (c) 2018 IBM Corporation and other Contributors.
#
# All rights reserved. This program and the accompanying materials
# are made available under the terms of the Eclipse Public License v1.0
# which accompanies this distribution, and is available at
# http://www.eclipse.org/legal/epl-v10.html
# *****************************************************************************
from datetime import datetime
from collections import defaultdict
from wiotp.sdk.exceptions import ApiException
class DataTransferSummary(defaultdict):
def __init__(self, **kwargs):
daysAsObj = []
if "days" in kwargs and kwargs["days"] is not None:
for day in kwargs["days"]:
daysAsObj.append(DayDataTransfer(**day))
del kwargs["days"]
dict.__init__(self, days=daysAsObj, **kwargs)
@property
def start(self):
return datetime.strptime(self["start"], "%Y-%m-%d").date()
@property
def end(self):
return datetime.strptime(self["end"], "%Y-%m-%d").date()
@property
def average(self):
return self["average"]
@property
def total(self):
return self["total"]
@property
def days(self):
return self["days"]
class DayDataTransfer(defaultdict):
def __init__(self, **kwargs):
dict.__init__(self, **kwargs)
@property
def date(self):
return datetime.strptime(self["date"], "%Y-%m-%d").date()
@property
def total(self):
return self["total"]
class Usage:
def __init__(self, apiClient):
self._apiClient = apiClient
def dataTransfer(self, start, end, detail=False):
"""
Retrieve the organization-specific status of each of the services offered by the IBM Watson IoT Platform.
In case of failure it throws APIException
"""
r = self._apiClient.get(
"api/v0002/usage/data-traffic?start=%s&end=%s&detail=%s"
% (start.strftime("%Y-%m-%d"), end.strftime("%Y-%m-%d"), detail)
)
if r.status_code == 200:
return DataTransferSummary(**r.json())
else:
raise ApiException(r)
|
epl-1.0
|
a70bd166a2e5bef336742ee9905bf3e0
| 28.486842
| 113
| 0.57162
| 4.18097
| false
| false
| false
| false
|
ibm-watson-iot/iot-python
|
test/test_device_mgd.py
|
2
|
11230
|
# *****************************************************************************
# Copyright (c) 2016,2018 IBM Corporation and other Contributors.
#
# All rights reserved. This program and the accompanying materials
# are made available under the terms of the Eclipse Public License v1.0
# which accompanies this distribution, and is available at
# http://www.eclipse.org/legal/epl-v10.html
# *****************************************************************************
import pytest
import testUtils
import uuid
import os
import wiotp.sdk
import time
from wiotp.sdk.device import ManagedDeviceClient
from wiotp.sdk import Utf8Codec
class TestDeviceMgd(testUtils.AbstractTest):
def testManagedDeviceQSException(self):
with pytest.raises(wiotp.sdk.ConfigurationException) as e:
options = {"identity": {"orgId": "quickstart", "typeId": "xxx", "deviceId": "xxx"}}
wiotp.sdk.device.ManagedDeviceClient(options)
assert "QuickStart does not support device management" == e.value.reason
def testManagedDeviceConnectException(self, device):
badOptions = {
"identity": {"orgId": self.ORG_ID, "typeId": device.typeId, "deviceId": device.deviceId},
"auth": {"token": "xxxxxxxxxxxxxxxxxx"},
}
deviceInfoObj = wiotp.sdk.device.DeviceInfo()
managedDevice = wiotp.sdk.device.ManagedDeviceClient(badOptions, deviceInfo=deviceInfoObj)
assert isinstance(managedDevice, wiotp.sdk.device.ManagedDeviceClient)
with pytest.raises(wiotp.sdk.ConnectionException) as e:
managedDevice.connect()
assert managedDevice.isConnected() == False
def testManagedDeviceConnect(self, device):
badOptions = {
"identity": {"orgId": self.ORG_ID, "typeId": device.typeId, "deviceId": device.deviceId},
"auth": {"token": device.authToken},
}
deviceInfoObj = wiotp.sdk.device.DeviceInfo()
managedDevice = wiotp.sdk.device.ManagedDeviceClient(badOptions, deviceInfo=deviceInfoObj)
assert isinstance(managedDevice, wiotp.sdk.device.ManagedDeviceClient)
managedDevice.connect()
assert managedDevice.isConnected() == True
managedDevice.disconnect()
assert managedDevice.isConnected() == False
def testManagedDeviceSetPropertyNameNone(self):
with pytest.raises(Exception) as e:
config = {
"identity": {"orgId": "1", "typeId": "xxx", "deviceId": "xxx"},
"auth": {"token": "xxxxxxxxxxxxxxxxxx"},
}
managedDeviceClientValue = ManagedDeviceClient(config)
managedDeviceClientValue.setProperty(value=1)
assert "Unsupported property name: " in str(e.value)
def testManagedDeviceSetPropertyValue(self):
try:
config = {
"identity": {"orgId": "1", "typeId": "xxx", "deviceId": "xxx"},
"auth": {"token": "xxxxxxxxxxxxxxxxxx"},
}
managedDeviceClientValue = ManagedDeviceClient(config)
testName = "model"
testValue = 2
test = managedDeviceClientValue.setProperty(name=testName, value=testValue)
assert managedDeviceClientValue._deviceInfo[testName] == testValue
except:
assert False == True
# TO DO Rest of SetProperty and Notifyfieldchange (onSubscribe put variables)
# Code in comments hangs when running but improves percentage
# Look into later
# def testManagedDeviceManageOnSubscribe(self):
# try:
# config = {
# "identity": {"orgId": "1", "typeId": "xxx", "deviceId": "xxx"},
# "auth": {"token": "xxxxxxxxxxxxxxxxxx"},
# }
# managedDeviceClientValue = ManagedDeviceClient(config)
# test = managedDeviceClientValue._onSubscribe(mqttc=1, userdata=2, mid=3, granted_qos=4)
# assert True
# except:
# assert False == True
def testManagedDeviceManageLifetimeValueZero(self):
try:
config = {
"identity": {"orgId": "1", "typeId": "xxx", "deviceId": "xxx"},
"auth": {"token": "xxxxxxxxxxxxxxxxxx"},
}
managedDeviceClientValue = ManagedDeviceClient(config)
test = managedDeviceClientValue.manage(lifetime=3000)
assert True
except:
assert False == True
def testManagedDeviceUnManage(self):
try:
config = {
"identity": {"orgId": "1", "typeId": "xxx", "deviceId": "xxx"},
"auth": {"token": "xxxxxxxxxxxxxxxxxx"},
}
managedDeviceClientValue = ManagedDeviceClient(config)
test = managedDeviceClientValue.unmanage()
assert True
except:
assert False == True
def testManagedDeviceSetLocationLongitude(self):
try:
config = {
"identity": {"orgId": "1", "typeId": "xxx", "deviceId": "xxx"},
"auth": {"token": "xxxxxxxxxxxxxxxxxx"},
}
managedDeviceClientValue = ManagedDeviceClient(config)
test = managedDeviceClientValue.setLocation(longitude=1, latitude=2)
assert managedDeviceClientValue._location["longitude"] == 1
except:
assert False == True
def testManagedDeviceSetLocationLatitude(self):
try:
config = {
"identity": {"orgId": "1", "typeId": "xxx", "deviceId": "xxx"},
"auth": {"token": "xxxxxxxxxxxxxxxxxx"},
}
managedDeviceClientValue = ManagedDeviceClient(config)
test = managedDeviceClientValue.setLocation(longitude=1, latitude=2)
assert managedDeviceClientValue._location["latitude"] == 2
except:
assert False == True
def testManagedDeviceSetLocationElevation(self):
try:
config = {
"identity": {"orgId": "1", "typeId": "xxx", "deviceId": "xxx"},
"auth": {"token": "xxxxxxxxxxxxxxxxxx"},
}
managedDeviceClientValue = ManagedDeviceClient(config)
test = managedDeviceClientValue.setLocation(longitude=1, latitude=2, elevation=3)
assert managedDeviceClientValue._location["elevation"] == 3
except:
assert False == True
def testManagedDeviceSetLocationAccuracy(self):
try:
config = {
"identity": {"orgId": "1", "typeId": "xxx", "deviceId": "xxx"},
"auth": {"token": "xxxxxxxxxxxxxxxxxx"},
}
managedDeviceClientValue = ManagedDeviceClient(config)
test = managedDeviceClientValue.setLocation(longitude=1, latitude=2, elevation=3, accuracy=4)
assert managedDeviceClientValue._location["accuracy"] == 4
except:
assert False == True
def testManagedDeviceSetErrorCodeNone(self):
try:
config = {
"identity": {"orgId": "1", "typeId": "xxx", "deviceId": "xxx"},
"auth": {"token": "xxxxxxxxxxxxxxxxxx"},
}
managedDeviceClientValue = ManagedDeviceClient(config)
test = managedDeviceClientValue.setErrorCode(errorCode=None)
assert managedDeviceClientValue._errorCode == 0
except:
assert False == True
def testManagedDeviceSetErrorCode(self):
try:
config = {
"identity": {"orgId": "1", "typeId": "xxx", "deviceId": "xxx"},
"auth": {"token": "xxxxxxxxxxxxxxxxxx"},
}
managedDeviceClientValue = ManagedDeviceClient(config)
test = managedDeviceClientValue.setErrorCode(errorCode=15)
assert True
except:
assert False == True
def testManagedDeviceClearErrorCodes(self):
try:
config = {
"identity": {"orgId": "1", "typeId": "xxx", "deviceId": "xxx"},
"auth": {"token": "xxxxxxxxxxxxxxxxxx"},
}
managedDeviceClientValue = ManagedDeviceClient(config)
test = managedDeviceClientValue.clearErrorCodes()
assert managedDeviceClientValue._errorCode == None
except:
assert False == True
def testManagedDeviceAddLog(self):
try:
config = {
"identity": {"orgId": "1", "typeId": "xxx", "deviceId": "xxx"},
"auth": {"token": "xxxxxxxxxxxxxxxxxx"},
}
managedDeviceClientValue = ManagedDeviceClient(config)
test = managedDeviceClientValue.addLog(msg="h", data="e")
assert True
except:
assert False == True
def testManagedDeviceClearLog(self):
try:
config = {
"identity": {"orgId": "1", "typeId": "xxx", "deviceId": "xxx"},
"auth": {"token": "xxxxxxxxxxxxxxxxxx"},
}
managedDeviceClientValue = ManagedDeviceClient(config)
test = managedDeviceClientValue.clearLog()
assert True
except:
assert False == True
def testManagedDeviceRespondDeviceAction(self):
try:
config = {
"identity": {"orgId": "1", "typeId": "xxx", "deviceId": "xxx"},
"auth": {"token": "xxxxxxxxxxxxxxxxxx"},
}
managedDeviceClientValue = ManagedDeviceClient(config)
test = managedDeviceClientValue.respondDeviceAction(reqId=1)
assert True
except:
assert False == True
# Do line 337 - 571
def testManagedDeviceSetState(self):
try:
config = {
"identity": {"orgId": "1", "typeId": "xxx", "deviceId": "xxx"},
"auth": {"token": "xxxxxxxxxxxxxxxxxx"},
}
managedDeviceClientValue = ManagedDeviceClient(config)
test = managedDeviceClientValue.setState(status=1)
assert True
except:
assert False == True
def testManagedDeviceSetUpdateStatus(self):
try:
config = {
"identity": {"orgId": "1", "typeId": "xxx", "deviceId": "xxx"},
"auth": {"token": "xxxxxxxxxxxxxxxxxx"},
}
managedDeviceClientValue = ManagedDeviceClient(config)
test = managedDeviceClientValue.setUpdateStatus(status=1)
except:
assert False == True
# Use template for rest __functions
def testManagedDeviceMgmtResponseError(self):
with pytest.raises(Exception) as e:
config = {
"identity": {"orgId": "1", "typeId": "xxx", "deviceId": "xxx"},
"auth": {"token": "xxxxxxxxxxxxxxxxxx"},
}
managedDevice = ManagedDeviceClient(config)
testValue = "Test"
encodedPayload = Utf8Codec.encode(testValue)
managedDevice._ManagedDeviceClient__onDeviceMgmtResponse(client=1, userdata=2, pahoMessage=encodedPayload)
assert "Unable to parse JSON. payload=" " error" in str(e.value)
|
epl-1.0
|
88cc7945d570b3766356e9f5cfe113fe
| 40.439114
| 118
| 0.574087
| 4.424744
| false
| true
| false
| false
|
mbj4668/pyang
|
pyang/repository.py
|
1
|
5853
|
"""A repository for searching and holding loaded pyang modules"""
import os
import sys
import io
from . import util
from . import syntax
class Repository(object):
"""Abstract base class that represents a module repository"""
def get_modules_and_revisions(self, ctx):
"""Return a list of all modules and their revisons
Returns a tuple (`modulename`, `revision`, `handle`), where
`handle' is used in the call to get_module_from_handle() to
retrieve the module.
"""
def get_module_from_handle(self, handle):
"""Return the raw module text from the repository
Returns (`ref`, `in_format`, `text`) if found, or None if not found.
`ref` is a string which is used to identify the source of
the text for the user. used in error messages
`in_format` is one of 'yang' or 'yin' or None.
`text` is the raw text data
Raises `ReadError`
"""
class ReadError(Exception):
"""Signals that an error occured during module retrieval"""
class FileRepository(Repository):
def __init__(self, path="", use_env=True, no_path_recurse=False,
verbose=False):
"""Create a Repository which searches the filesystem for modules
`path` is a `os.pathsep`-separated string of directories
"""
Repository.__init__(self)
self.dirs = []
self.no_path_recurse = no_path_recurse
self.modules = None
self.verbose = verbose
for directory in path.split(os.pathsep):
self._add_directory(directory)
while use_env:
use_env = False
modpath = os.getenv('YANG_MODPATH')
if modpath is not None:
for directory in modpath.split(os.pathsep):
self._add_directory(directory)
home = os.getenv('HOME')
if home is not None:
self._add_directory(os.path.join(home, 'yang', 'modules'))
inst = os.getenv('YANG_INSTALL')
if inst is not None:
self._add_directory(os.path.join(inst, 'yang', 'modules'))
break # skip search if install location is indicated
default_install = os.path.join(
sys.prefix, 'share', 'yang', 'modules')
if os.path.exists(default_install):
self._add_directory(default_install)
break # end search if default location exists
# for some systems, sys.prefix returns `/usr`
# but the real location is `/usr/local`
# if the package is installed with pip
# this information can be easily retrieved
import pkgutil
if not pkgutil.find_loader('pip'):
break # abort search if pip is not installed
# hack below to handle pip 10 internals
# if someone knows pip and how to fix this, it would be great!
location = None
try:
import pip.locations as locations
location = locations.distutils_scheme('pyang')
except:
try:
import pip._internal.locations as locations
location = locations.distutils_scheme('pyang')
except:
pass
if location is not None:
self._add_directory(
os.path.join(location['data'], 'share', 'yang', 'modules'))
if verbose:
sys.stderr.write('# module search path: %s\n'
% os.pathsep.join(self.dirs))
def _add_directory(self, directory):
if (not directory
or directory in self.dirs
or not os.path.isdir(directory)):
return False
self.dirs.append(directory)
return True
def _setup(self, ctx):
# check all dirs for yang and yin files
self.modules = []
def add_files_from_dir(d):
try:
files = os.listdir(d)
except OSError:
files = []
for fname in files:
absfilename = os.path.join(d, fname)
if os.path.isfile(absfilename):
m = syntax.re_filename.search(fname)
if m is not None:
name, rev, in_format = m.groups()
if not os.access(absfilename, os.R_OK):
continue
if absfilename.startswith("./"):
absfilename = absfilename[2:]
handle = in_format, absfilename
self.modules.append((name, rev, handle))
elif (not self.no_path_recurse
and d != '.' and os.path.isdir(absfilename)):
add_files_from_dir(absfilename)
for d in self.dirs:
add_files_from_dir(d)
def get_modules_and_revisions(self, ctx):
if self.modules is None:
self._setup(ctx)
return self.modules
def get_module_from_handle(self, handle):
in_format, absfilename = handle
fd = None
try:
fd = io.open(absfilename, "r", encoding="utf-8")
text = fd.read()
if self.verbose:
util.report_file_read(absfilename)
except IOError as ex:
raise self.ReadError("%s: %s" % (absfilename, ex))
except UnicodeDecodeError as ex:
s = str(ex).replace('utf-8', 'utf8')
raise self.ReadError("%s: unicode error: %s" % (absfilename, s))
finally:
if fd is not None:
fd.close()
if in_format is None:
in_format = util.guess_format(text)
return absfilename, in_format, text
|
isc
|
17e8f7f53c9b7c93601f109e9dc07617
| 35.12963
| 79
| 0.537502
| 4.491942
| false
| false
| false
| false
|
mbj4668/pyang
|
test/test_issues/test_i225/test_prefix_deviation.py
|
1
|
3107
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# pylint: disable=redefined-outer-name
"""
tests for PYANG data files
"""
import os
import sys
# hack to handle pip 10 internals
try:
import pip.locations as locations
except ImportError:
import pip._internal.locations as locations
from pyang.context import Context
from pyang.repository import FileRepository
EXISTING_MODULE = 'ietf-yang-types'
DEFAULT_OPTIONS = {
'format': 'yang',
'verbose': True,
'list_errors': True,
'print_error_code': True,
'yang_remove_unused_imports': True,
'yang_canonical': True,
'trim_yin': False,
'keep_comments': True,
'features': [],
'deviations': [],
'path': []
}
"""Default options for pyang command line"""
class objectify(object):
"""Utility for providing object access syntax (.attr) to dicts"""
def __init__(self, *args, **kwargs):
for entry in args:
self.__dict__.update(entry)
self.__dict__.update(kwargs)
def __getattr__(self, _):
return None
def __setattr__(self, attr, value):
self.__dict__[attr] = value
def create_context(path='.', *options, **kwargs):
"""Generates a pyang context
Arguments:
path (str): location of YANG modules.
*options: list of dicts, with options to be passed to context.
**kwargs: similar to ``options`` but have a higher precedence.
Returns:
pyang.Context: Context object for ``pyang`` usage
"""
opts = objectify(DEFAULT_OPTIONS, *options, **kwargs)
repo = FileRepository(path, no_path_recurse=opts.no_path_recurse)
ctx = Context(repo)
ctx.opts = opts
return ctx
def test_can_find_modules_with_pip_install():
"""
context should find the default installed modules even when pyang
is installed using pip
"""
# remove obfuscation from env vars
if os.environ.get('YANG_INSTALL'):
del os.environ['YANG_INSTALL']
if os.environ.get('YANG_MODPATH'):
del os.environ['YANG_MODPATH']
ctx = create_context()
module = ctx.search_module(None, EXISTING_MODULE)
assert module is not None
def test_can_find_modules_when_prefix_differ(monkeypatch):
"""
context should find the default installed modules, without the help
of environment variables, even of the pip install location
differs from ``sys.prefix``
"""
# store pip location.
# monkeypatching sys.prefix will side_effect scheme.
try:
scheme = locations.distutils_scheme('pyang')
monkeypatch.setattr(
locations, 'distutils_scheme', lambda *_: scheme)
except:
print("cannot get scheme from pip, skipping")
return
# simulate #225 description
monkeypatch.setattr(sys, 'prefix', '/usr')
# remove obfuscation from env vars
if os.environ.get('YANG_INSTALL'):
del os.environ['YANG_INSTALL']
if os.environ.get('YANG_MODPATH'):
del os.environ['YANG_MODPATH']
ctx = create_context()
module = ctx.search_module(None, EXISTING_MODULE)
assert module is not None
|
isc
|
48b0babf28f81699b5b6a833358dc176
| 24.891667
| 71
| 0.641455
| 3.86924
| false
| false
| false
| false
|
mbj4668/pyang
|
pyang/plugins/omni.py
|
1
|
11901
|
import optparse
from pyang import plugin
paths_in_module = []
leafrefs = []
key = ''
class_keywords = ["container", "list", "case", "choice", "augment"]
servicepoints = ["servicepoint", "productpoint"]
classnamecolor = " {0.113725, 0.352941, 0.670588}"
mandatoryconfig = " {0.600000, 0.152941, 0.152941}"
optionalconfig = " {0.129412, 0.501961, 0.254902}"
notconfig = " {0.549020, 0.486275, 0.133333}"
#which line for containment, omnigraffles makes some bezier, override this
containsline = " tail type: \"FilledDiamond\", head type: \"None\", line type: \"Straight\" "
leafrefline = " line type: \"Straight\", head type: \"FilledArrow\" "
def pyang_plugin_init():
plugin.register_plugin(OmniPlugin())
class OmniPlugin(plugin.PyangPlugin):
def add_output_format(self, fmts):
self.multiple_modules = True
fmts['omni'] = self
def add_opts(self, optparser):
optlist = [
optparse.make_option("--omni-path",
dest="omni_tree_path",
help="Subtree to print"),
]
g = optparser.add_option_group("OmniGraffle output specific options")
g.add_options(optlist)
def setup_fmt(self, ctx):
ctx.implicit_errors = False
def emit(self, ctx, modules, fd):
if ctx.opts.omni_tree_path is not None:
path = ctx.opts.omni_tree_path.split('/')
if path[0] == '':
path = path[1:]
else:
path = None
print_omni_header(modules, fd, path, ctx)
emit_modules(modules, fd, path, ctx)
post_process(fd, ctx)
print_omni_footer(modules, fd, path, ctx)
def print_omni_header(modules, fd, path, ctx):
# Build doc name from module names
name = ''
for m in modules:
name += m.arg
name = name[:32]
fd.write("""
tell application id "com.omnigroup.OmniGraffle6"
activate
make new document with properties {name:\"%s\"}
set bounds of window 1 to {50, 50, 1200, 800}
tell first canvas of document \"%s\"
set canvasSize to {600, 600}
set name to \"YANG Model\"
set adjusts pages to true
make new shape at end of graphics with properties {fill: no fill, draws stroke: false, draws shadow: false, autosizing: full, size: {32.000000, 20.000000}, text: {size: 8, alignment: center, font: "HelveticaNeue", text: "leafref"}, origin: {2403.202333, 169.219094}}
make new line at end of graphics with properties {point list: {{2513.245592418806, 185.5962102698529}, {2373.745592418806, 185.3149602698529}}, draws shadow: false, head type: "FilledArrow"}
make new shape at end of graphics with properties {fill: no fill, draws stroke: false, draws shadow: false, autosizing: full, size: {105.000000, 20.000000}, text: {size: 8, alignment: center, font: "HelveticaNeue", text: "Schema tree, containment"}, origin: {2397.741930, 138.863190}}
make new line at end of graphics with properties {point list: {{2374.993645107464, 154.4881903780727}, {2514.493645107464, 154.4881903780727}}, draws shadow: false, tail type: "FilledDiamond"}
make new shape at end of graphics with properties {autosizing: vertically only, size: {139.500000, 14.000000}, text: {alignment: center, font: "Helvetica-Bold", text: "Legend"}, text placement: top, origin: {2366.929155, 43.937008}, vertical padding: 0}
make new shape at end of graphics with properties {autosizing: vertically only, size: {139.500000, 56.000000}, text: {{color: {0.600000, 0.152941, 0.152941}, text: "Mandatory config
"}, {color: {0.129412, 0.501961, 0.254902}, text: "Optional config
"}, {color: {0.129412, 0.501961, 0.254902}, text: "Key leaf", underlined: true}, {color: {0.129412, 0.501961, 0.254902}, text: "
"}, {color: {0.549020, 0.486275, 0.133333}, text: "Not config"}}, text placement: top, origin: {2366.929155, 57.937008}, vertical padding: 0}
assemble graphics -2 through -1 table shape { 2, 1 }
assemble graphics -5 through -1
""" %(name, name))
def post_process(fd, ctx):
for s in leafrefs:
# dont try to connect to class not given as input to pyang
if s.strip().split(" to ")[1].split(" with ")[0] in paths_in_module:
fd.write(s)
def print_omni_footer(modules, fd, path, ctx):
fd.write("""
layout
end tell
end tell
""")
def print_module_info(module, fd, ctx):
title = module.arg
print_text(title, fd, ctx)
def emit_modules(modules, fd, path, ctx):
for module in modules:
print_module_info(module, fd, ctx)
chs = [ch for ch in module.i_children]
if path is not None and len(path) > 0:
chs = [ch for ch in chs
if ch.arg == path[0]]
path = path[1:]
# TEST
for ch in chs:
print_node(module, ch, module, fd, path, ctx, 'true')
for augment in module.search('augment'):
print_node(module, augment, module, fd, path, ctx, 'true')
def iterate_children(parent, s, module, fd, path, ctx):
if hasattr(s, 'i_children'):
for ch in s.i_children:
print_node(s, ch, module, fd, path, ctx)
def print_class_header(s, fd, ctx, root='false'):
global servicepoints
service = ""
for sub in s.substmts:
if sub.keyword[1] in servicepoints:
service = "SERVICE\n"
fd.write("make new shape at end of graphics with properties {autosizing: full, size: {187.500000, 14.000000}, text: {{alignment: center, font: \"Helvetica-Bold\", text: \"%s \"}, {alignment: center, color:%s, font: \"Helvetica-Bold\", text: \"%s \"}}, text placement: top, origin: {150.000000, 11.500000}, vertical padding: 0}\n" %(service + s.keyword, classnamecolor, s.arg))
def print_class_stuff(s, fd, ctx):
number = print_attributes(s, fd, ctx)
#print_actions(s,fd, ctx)
close_class(number, s, fd, ctx)
print_associations(s,fd, ctx)
def print_attributes(s,fd, ctx):
global key
if s.keyword == 'list':
keystring = s.search_one('key')
if keystring is not None:
key = keystring.arg.split(" ")
else:
key = ''
if hasattr(s, 'i_children'):
found_attrs = False
found_actions = False
index = False
# Search attrs
for ch in s.i_children:
index = False
if ch.keyword in ["leaf", "leaf-list"]:
if not found_attrs:
# first attr in attr section
fd.write("make new shape at end of graphics with properties {autosizing:full, size:{187.5, 28.0}, text:{")
found_attrs = True
else:
# comma before new textitem
fd.write(", ")
if ch.keyword == "leaf-list":
append = "[]"
else:
append = ""
if ch.arg in key:
index = True
print_leaf(ch, append, index, fd, ctx)
if found_attrs:
# close attr section
fd.write("}, text placement:top, origin:{150.0, 25.5}, vertical padding:0}\n")
# Search actions
for ch in s.i_children:
if ch.keyword == ('tailf-common', 'action'):
if not found_actions:
fd.write("make new shape at end of graphics with properties {autosizing:full, size:{187.5, 28.0}, text:{text:\"")
found_actions = True
print_action(ch, fd, ctx)
if found_actions:
fd.write("\"}, text placement:top, origin:{150.0, 25.5}, vertical padding:0}\n")
# return number of sections in class
return (found_attrs + found_actions) + 1
def close_class(number, s, fd, ctx):
fd.write("local %s\n" % fullpath(s))
fd.write("set %s to assemble ( graphics -%s through -1 ) table shape {%s, 1}\n"
% (fullpath(s), number, number))
def print_node(parent, s, module, fd, path, ctx, root='false'):
# We have a class
if s.keyword in class_keywords:
print_class_header(s, fd, ctx, root)
paths_in_module.append(fullpath(s))
print_class_stuff(s, fd, ctx)
# Do not try to create relationship to module
if parent != module:
presence = s.search_one("presence")
if presence is not None:
print_aggregation(parent, s, fd, "0", "1", ctx)
else:
print_aggregation(parent, s, fd, "1", "1", ctx)
iterate_children(parent, s, module, fd, path, ctx)
def print_associations(s, fd, ctx):
# find leafrefs and identityrefs
if hasattr(s, 'i_children'):
for ch in s.i_children:
if hasattr(ch, 'i_leafref_ptr') and (ch.i_leafref_ptr is not None):
to = ch.i_leafref_ptr[0]
print_association(s, to.parent, ch, to, "leafref", fd, ctx)
def print_aggregation(parent, this, fd, lower, upper, ctx):
fd.write("connect %s to %s with properties {%s} \n" %(fullpath(parent),fullpath(this), containsline))
def print_rpc(rpc, fd, ctx, root='false'):
fd.write("<UML:Class xmi.id = \'%s\' name = \'%s\' " %(fullpath(rpc), rpc.arg))
def print_action(action, fd, ctx, root='false'):
fd.write("%s()\n" %action.arg)
def print_notification(notification, fd, ctx, root='false'):
fd.write("<UML:Class xmi.id = \'%s\' name = \'%s\' " %(fullpath(notification), notification.arg))
def print_inout(parent, s, fd, ctx, root='false'):
fd.write("<UML:Class xmi.id = \'%s\' name = \'%s-%s\' " %(fullpath(s), parent.arg, s.keyword))
def print_leaf(leaf, append, index, fd, ctx):
if leaf.i_config:
c = '(rw)'
color = optionalconfig
else:
c = '(ro)'
color = notconfig
m = leaf.search_one('mandatory')
if m is None or m.arg == 'false':
mand = '?'
else:
mand = ''
color = mandatoryconfig
if not index:
fd.write("{font: \"Helvetica-Oblique\", color: %s, text: \"%s%s%s %s %s\n\"}"
% (color, leaf.arg, append, mand, c, get_typename(leaf)))
else:
fd.write("{font: \"Helvetica-Oblique\", color: %s, underlined: true, text: \"%s%s%s %s %s\n\"}"
% (color, leaf.arg, append, mand, c, get_typename(leaf)))
def print_association(fromclass, toclass, fromleaf, toleaf, association, fd, ctx):
leafrefs.append("connect " + (fullpath(fromclass)) + " to " + fullpath(toclass) + " with properties {" + leafrefline + "}\n", )
def print_text(t, fd, ctx):
fd.write("make new shape at end of graphics with properties {fill: no fill, draws stroke: false, draws shadow: false, autosizing: full, size: {57.000000, 30.000000}, text: {size: 16, alignment: center, font: \"HelveticaNeue\", text: \"%s\"}, origin: {100, 4.500000}}\n" %t)
def get_typename(s):
t = s.search_one('type')
if t is not None:
s = t.arg
# if t.arg == 'enumeration':
# s = s + ' : {'
# for enums in t.substmts[:10]:
# s = s + enums.arg + ','
# if len(t.substmts) > 3:
# s = s + "..."
# s = s + '}'
# elif t.arg == 'leafref':
# s = s + ' : '
# p = t.search_one('path')
# if p is not None:
# s = s + p.arg
return s
def fullpath(stmt):
pathsep = "_"
path = stmt.arg
# for augment paths we need to remove initial /
if path.startswith("/"):
path = path[1:]
else:
if stmt.keyword == 'case':
path = path + '-case'
elif stmt.keyword == 'grouping':
path = path + '-grouping'
while stmt.parent is not None:
stmt = stmt.parent
if stmt.arg is not None:
path = stmt.arg + pathsep + path
path = path.replace('-', '_')
path = path.replace(':', '_')
path = path.replace('/', '_')
return path
|
isc
|
be68baeefe1ad69a4479aae3892de1f6
| 36.780952
| 380
| 0.580035
| 3.356176
| false
| false
| false
| false
|
End of preview. Expand
in Data Studio
No dataset card yet
- Downloads last month
- 8