code
stringlengths 59
4.4k
| docstring
stringlengths 5
7.69k
|
|---|---|
def x10_command(self, house_code, unit_number, state):
house_code = normalize_housecode(house_code)
if unit_number is not None:
unit_number = normalize_unitnumber(unit_number)
return self._x10_command(house_code, unit_number, state)
|
Send X10 command to ??? unit.
@param house_code (A-P) - example='A'
@param unit_number (1-16)- example=1 (or None to impact entire house code)
@param state - Mochad command/state, See
https://sourceforge.net/p/mochad/code/ci/master/tree/README
examples=OFF, 'OFF', 'ON', ALL_OFF, 'all_units_off', 'xdim 128', etc.
Examples:
x10_command('A', '1', ON)
x10_command('A', '1', OFF)
x10_command('A', '1', 'ON')
x10_command('A', '1', 'OFF')
x10_command('A', None, ON)
x10_command('A', None, OFF)
x10_command('A', None, 'all_lights_off')
x10_command('A', None, 'all_units_off')
x10_command('A', None, ALL_OFF)
x10_command('A', None, 'all_lights_on')
x10_command('A', 1, 'xdim 128')
|
def buildOverlappedSequences( numSequences = 2,
seqLen = 5,
sharedElements = [3,4],
numOnBitsPerPattern = 3,
patternOverlap = 0,
seqOverlap = 0,
**kwargs
):
numSharedElements = len(sharedElements)
numUniqueElements = seqLen - numSharedElements
numPatterns = numSharedElements + numUniqueElements * numSequences
patterns = getSimplePatterns(numOnBitsPerPattern, numPatterns, patternOverlap)
numCols = len(patterns[0])
trainingSequences = []
uniquePatternIndices = range(numSharedElements, numPatterns)
for _ in xrange(numSequences):
sequence = []
sharedPatternIndices = range(numSharedElements)
for j in xrange(seqLen):
if j in sharedElements:
patIdx = sharedPatternIndices.pop(0)
else:
patIdx = uniquePatternIndices.pop(0)
sequence.append(patterns[patIdx])
trainingSequences.append(sequence)
if VERBOSITY >= 3:
print "\nTraining sequences"
printAllTrainingSequences(trainingSequences)
return (numCols, trainingSequences)
|
Create training sequences that share some elements in the middle.
Parameters:
-----------------------------------------------------
numSequences: Number of unique training sequences to generate
seqLen: Overall length of each sequence
sharedElements: Which element indices of each sequence are shared. These
will be in the range between 0 and seqLen-1
numOnBitsPerPattern: Number of ON bits in each TM input pattern
patternOverlap: Max number of bits of overlap between any 2 patterns
retval: (numCols, trainingSequences)
numCols - width of the patterns
trainingSequences - a list of training sequences
|
def delayed_call(self, delay, function):
main_loop = self
handler = []
class DelayedCallHandler(TimeoutHandler):
@timeout_handler(delay, False)
def callback(self):
try:
function()
finally:
main_loop.remove_handler(handler[0])
handler.append(DelayedCallHandler())
self.add_handler(handler[0])
|
Schedule function to be called from the main loop after `delay`
seconds.
:Parameters:
- `delay`: seconds to wait
:Types:
- `delay`: `float`
|
def flat_map(self, flatmap_function):
from heronpy.streamlet.impl.flatmapbolt import FlatMapStreamlet
fm_streamlet = FlatMapStreamlet(flatmap_function, self)
self._add_child(fm_streamlet)
return fm_streamlet
|
Return a new Streamlet by applying map_function to each element of this Streamlet
and flattening the result
|
def get_raw_output(self, tile, _baselevel_readonly=False):
if not isinstance(tile, (BufferedTile, tuple)):
raise TypeError("'tile' must be a tuple or BufferedTile")
if isinstance(tile, tuple):
tile = self.config.output_pyramid.tile(*tile)
if _baselevel_readonly:
tile = self.config.baselevels["tile_pyramid"].tile(*tile.id)
if tile.zoom not in self.config.zoom_levels:
return self.config.output.empty(tile)
if tile.crs != self.config.process_pyramid.crs:
raise NotImplementedError(
"reprojection between processes not yet implemented"
)
if self.config.mode == "memory":
process_tile = self.config.process_pyramid.intersecting(tile)[0]
return self._extract(
in_tile=process_tile,
in_data=self._execute_using_cache(process_tile),
out_tile=tile
)
process_tile = self.config.process_pyramid.intersecting(tile)[0]
if tile.pixelbuffer > self.config.output.pixelbuffer:
output_tiles = list(self.config.output_pyramid.tiles_from_bounds(
tile.bounds, tile.zoom
))
else:
output_tiles = self.config.output_pyramid.intersecting(tile)
if self.config.mode == "readonly" or _baselevel_readonly:
if self.config.output.tiles_exist(process_tile):
return self._read_existing_output(tile, output_tiles)
else:
return self.config.output.empty(tile)
elif self.config.mode == "continue" and not _baselevel_readonly:
if self.config.output.tiles_exist(process_tile):
return self._read_existing_output(tile, output_tiles)
else:
return self._process_and_overwrite_output(tile, process_tile)
elif self.config.mode == "overwrite" and not _baselevel_readonly:
return self._process_and_overwrite_output(tile, process_tile)
|
Get output raw data.
This function won't work with multiprocessing, as it uses the
``threading.Lock()`` class.
Parameters
----------
tile : tuple, Tile or BufferedTile
If a tile index is given, a tile from the output pyramid will be
assumed. Tile cannot be bigger than process tile!
Returns
-------
data : NumPy array or features
process output
|
def getOutputElementCount(self, name):
if name == "resetOut":
print ("WARNING: getOutputElementCount should not have been called with "
"resetOut")
return 1
elif name == "sequenceIdOut":
print ("WARNING: getOutputElementCount should not have been called with "
"sequenceIdOut")
return 1
elif name == "dataOut":
if self.encoder is None:
raise Exception("NuPIC requested output element count for 'dataOut' "
"on a RecordSensor node, but the encoder has not "
"been set")
return self.encoder.getWidth()
elif name == "sourceOut":
if self.encoder is None:
raise Exception("NuPIC requested output element count for 'sourceOut' "
"on a RecordSensor node, "
"but the encoder has not been set")
return len(self.encoder.getDescription())
elif name == "bucketIdxOut":
return 1
elif name == "actValueOut":
return 1
elif name == "categoryOut":
return self.numCategories
elif name == 'spatialTopDownOut' or name == 'temporalTopDownOut':
if self.encoder is None:
raise Exception("NuPIC requested output element count for 'sourceOut' "
"on a RecordSensor node, "
"but the encoder has not been set")
return len(self.encoder.getDescription())
else:
raise Exception("Unknown output %s" % name)
|
Computes the width of dataOut.
Overrides
:meth:`nupic.bindings.regions.PyRegion.PyRegion.getOutputElementCount`.
|
def _setup_custom_grouping(self, topology):
for i in range(len(topology.bolts)):
for in_stream in topology.bolts[i].inputs:
if in_stream.stream.component_name == self.my_component_name and \
in_stream.gtype == topology_pb2.Grouping.Value("CUSTOM"):
if in_stream.type == topology_pb2.CustomGroupingObjectType.Value("PYTHON_OBJECT"):
custom_grouping_obj = default_serializer.deserialize(in_stream.custom_grouping_object)
if isinstance(custom_grouping_obj, str):
pex_loader.load_pex(self.topology_pex_abs_path)
grouping_cls = \
pex_loader.import_and_get_class(self.topology_pex_abs_path, custom_grouping_obj)
custom_grouping_obj = grouping_cls()
assert isinstance(custom_grouping_obj, ICustomGrouping)
self.custom_grouper.add(in_stream.stream.id,
self._get_taskids_for_component(topology.bolts[i].comp.name),
custom_grouping_obj,
self.my_component_name)
elif in_stream.type == topology_pb2.CustomGroupingObjectType.Value("JAVA_OBJECT"):
raise NotImplementedError("Java-serialized custom grouping is not yet supported "
"for python topology")
else:
raise ValueError("Unrecognized custom grouping type found: %s" % str(in_stream.type))
|
Checks whether there are any bolts that consume any of my streams using custom grouping
|
def _store_helper(model: Action, session: Optional[Session] = None) -> None:
if session is None:
session = _make_session()
session.add(model)
session.commit()
session.close()
|
Help store an action.
|
def remove_handler(self, handler):
with self.lock:
if handler in self.handlers:
self.handlers.remove(handler)
self._update_handlers()
|
Remove a handler object.
:Parameters:
- `handler`: the object to remove
|
def in_op(self, other):
if not is_object(other):
raise MakeError(
'TypeError',
"You can\'t use 'in' operator to search in non-objects")
return other.has_property(to_string(self))
|
checks if self is in other
|
def register(self, what, obj):
name = obj.name
version = obj.version
enable = obj.enable
if enable == 'n':
return
key = Key(name, version)
self.plugins[what][key] = obj
|
Registering a plugin
Params
------
what: Nature of the plugin (backend, instrumentation, repo)
obj: Instance of the plugin
|
def _build_google_client(service, api_version, http_auth):
client = build(service, api_version, http=http_auth)
return client
|
Google build client helper.
:param service: service to build client for
:type service: ``str``
:param api_version: API version to use.
:type api_version: ``str``
:param http_auth: Initialized HTTP client to use.
:type http_auth: ``object``
:return: google-python-api client initialized to use 'service'
:rtype: ``object``
|
def limit(self, max=30):
if abs(self.vx) > max:
self.vx = self.vx/abs(self.vx)*max
if abs(self.vy) > max:
self.vy = self.vy/abs(self.vy)*max
if abs(self.vz) > max:
self.vz = self.vz/abs(self.vz)*max
|
The speed limit for a boid.
Boids can momentarily go very fast,
something that is impossible for real animals.
|
def save_file(self, obj):
try:
import StringIO as pystringIO
except ImportError:
import io as pystringIO
if not hasattr(obj, 'name') or not hasattr(obj, 'mode'):
raise pickle.PicklingError("Cannot pickle files that do not map to an actual file")
if obj is sys.stdout:
return self.save_reduce(getattr, (sys, 'stdout'), obj=obj)
if obj is sys.stderr:
return self.save_reduce(getattr, (sys, 'stderr'), obj=obj)
if obj is sys.stdin:
raise pickle.PicklingError("Cannot pickle standard input")
if hasattr(obj, 'isatty') and obj.isatty():
raise pickle.PicklingError("Cannot pickle files that map to tty objects")
if 'r' not in obj.mode:
raise pickle.PicklingError("Cannot pickle files that are not opened for reading")
name = obj.name
try:
fsize = os.stat(name).st_size
except OSError:
raise pickle.PicklingError("Cannot pickle file %s as it cannot be stat" % name)
if obj.closed:
retval = pystringIO.StringIO("")
retval.close()
elif not fsize:
retval = pystringIO.StringIO("")
try:
tmpfile = file(name)
tst = tmpfile.read(1)
except IOError:
raise pickle.PicklingError("Cannot pickle file %s as it cannot be read" % name)
tmpfile.close()
if tst != '':
raise pickle.PicklingError(
"Cannot pickle file %s as it does not appear to map to a physical, real file" % name)
else:
try:
tmpfile = file(name)
contents = tmpfile.read()
tmpfile.close()
except IOError:
raise pickle.PicklingError("Cannot pickle file %s as it cannot be read" % name)
retval = pystringIO.StringIO(contents)
curloc = obj.tell()
retval.seek(curloc)
retval.name = name
self.save(retval)
self.memoize(obj)
|
Save a file
|
def load_all_methods(self):
r
methods = []
Tmins, Tmaxs = [], []
if self.CASRN in ['7732-18-5', '67-56-1', '64-17-5']:
methods.append(TEST_METHOD_1)
self.TEST_METHOD_1_Tmin = 200.
self.TEST_METHOD_1_Tmax = 350
self.TEST_METHOD_1_coeffs = [1, .002]
Tmins.append(self.TEST_METHOD_1_Tmin); Tmaxs.append(self.TEST_METHOD_1_Tmax)
if self.CASRN in ['67-56-1']:
methods.append(TEST_METHOD_2)
self.TEST_METHOD_2_Tmin = 300.
self.TEST_METHOD_2_Tmax = 400
self.TEST_METHOD_2_coeffs = [1, .003]
Tmins.append(self.TEST_METHOD_2_Tmin); Tmaxs.append(self.TEST_METHOD_2_Tmax)
self.all_methods = set(methods)
if Tmins and Tmaxs:
self.Tmin = min(Tmins)
self.Tmax = max(Tmaxs)
|
r'''Method to load all data, and set all_methods based on the available
data and properties. Demo function for testing only; must be
implemented according to the methods available for each individual
method.
|
def __intermediate_dns_servers(self, uci, address):
if 'dns' in uci:
return uci['dns']
if address['proto'] in ['dhcp', 'dhcpv6', 'none']:
return None
dns = self.netjson.get('dns_servers', None)
if dns:
return ' '.join(dns)
|
determines UCI interface "dns" option
|
def serialize_text(out, text):
padding = len(out)
add_padding = padding_adder(padding)
text = add_padding(text, ignore_first_line=True)
return out + text
|
This method is used to append content of the `text`
argument to the `out` argument.
Depending on how many lines in the text, a
padding can be added to all lines except the first
one.
Concatenation result is appended to the `out` argument.
|
def mpsse_read_gpio(self):
self._write('\x81\x83')
data = self._poll_read(2)
low_byte = ord(data[0])
high_byte = ord(data[1])
logger.debug('Read MPSSE GPIO low byte = {0:02X} and high byte = {1:02X}'.format(low_byte, high_byte))
return (high_byte << 8) | low_byte
|
Read both GPIO bus states and return a 16 bit value with their state.
D0-D7 are the lower 8 bits and C0-C7 are the upper 8 bits.
|
def ip2long(ip):
try:
return int(binascii.hexlify(socket.inet_aton(ip)), 16)
except socket.error:
return int(binascii.hexlify(socket.inet_pton(socket.AF_INET6, ip)), 16)
|
Wrapper function for IPv4 and IPv6 converters.
:arg ip: IPv4 or IPv6 address
|
def delete_key(key_name, stash, passphrase, backend):
stash = _get_stash(backend, stash, passphrase)
for key in key_name:
try:
click.echo('Deleting key {0}...'.format(key))
stash.delete(key_name=key)
except GhostError as ex:
sys.exit(ex)
click.echo('Keys deleted successfully')
|
Delete a key from the stash
`KEY_NAME` is the name of the key to delete
You can provide that multiple times to delete multiple keys at once
|
def hsv2rgb_spectrum(hsv):
h, s, v = hsv
return hsv2rgb_raw(((h * 192) >> 8, s, v))
|
Generates RGB values from HSV values in line with a typical light
spectrum.
|
def create_tree(endpoints):
tree = {}
for method, url, doc in endpoints:
path = [p for p in url.strip('/').split('/')]
here = tree
version = path[0]
here.setdefault(version, {})
here = here[version]
for p in path[1:]:
part = _camelcase_to_underscore(p)
here.setdefault(part, {})
here = here[part]
if not 'METHODS' in here:
here['METHODS'] = [[method, doc]]
else:
if not method in here['METHODS']:
here['METHODS'].append([method, doc])
return tree
|
Creates the Trello endpoint tree.
>>> r = {'1': { \
'actions': {'METHODS': {'GET'}}, \
'boards': { \
'members': {'METHODS': {'DELETE'}}}} \
}
>>> r == create_tree([ \
'GET /1/actions/[idAction]', \
'DELETE /1/boards/[board_id]/members/[idMember]'])
True
|
def schedule_play(self, call_params):
path = '/' + self.api_version + '/SchedulePlay/'
method = 'POST'
return self.request(path, method, call_params)
|
REST Schedule playing something on a call Helper
|
def warn_if_not_float(X, estimator='This algorithm'):
if not isinstance(estimator, str):
estimator = estimator.__class__.__name__
if X.dtype.kind != 'f':
warnings.warn("%s assumes floating point values as input, "
"got %s" % (estimator, X.dtype))
return True
return False
|
Warning utility function to check that data type is floating point.
Returns True if a warning was raised (i.e. the input is not float) and
False otherwise, for easier input validation.
|
def get_zones(input_list):
if not input_list:
return []
output_list = []
for zone in input_list:
if zone.endswith('*'):
prefix = zone[:-1]
output_list.extend([z for z in _ZONES if z.startswith(prefix)])
else:
output_list.append(zone)
return output_list
|
Returns a list of zones based on any wildcard input.
This function is intended to provide an easy method for producing a list
of desired zones for a pipeline to run in.
The Pipelines API default zone list is "any zone". The problem with
"any zone" is that it can lead to incurring Cloud Storage egress charges
if the GCE zone selected is in a different region than the GCS bucket.
See https://cloud.google.com/storage/pricing#network-egress.
A user with a multi-region US bucket would want to pipelines to run in
a "us-*" zone.
A user with a regional bucket in US would want to restrict pipelines to
run in a zone in that region.
Rarely does the specific zone matter for a pipeline.
This function allows for a simple short-hand such as:
[ "us-*" ]
[ "us-central1-*" ]
These examples will expand out to the full list of US and us-central1 zones
respectively.
Args:
input_list: list of zone names/patterns
Returns:
A list of zones, with any wildcard zone specifications expanded.
|
def total_memory():
with file('/proc/meminfo', 'r') as f:
for line in f:
words = line.split()
if words[0].upper() == 'MEMTOTAL:':
return int(words[1]) * 1024
raise IOError('MemTotal unknown')
|
Returns the the amount of memory available for use.
The memory is obtained from MemTotal entry in /proc/meminfo.
Notes
=====
This function is not very useful and not very portable.
|
def play(self):
if not self.is_playing():
self.play_pause()
self._is_playing = True
self.playEvent(self)
|
Play the video asynchronously returning control immediately to the calling code
|
def from_json(cls, fh):
if isinstance(fh, str):
return cls(json.loads(fh))
else:
return cls(json.load(fh))
|
Load json from file handle.
Args:
fh (file): File handle to load from.
Examlple:
>>> with open('data.json', 'r') as json:
>>> data = composite.load(json)
|
def html(tag):
return (HTML_START.format(tag=tag), HTML_END.format(tag=tag))
|
Return sequence of start and end regex patterns for simple HTML tag
|
def get_queue_data(self, queues=None, edge=None, edge_type=None, return_header=False):
queues = _get_queues(self.g, queues, edge, edge_type)
data = np.zeros((0, 6))
for q in queues:
dat = self.edge2queue[q].fetch_data()
if len(dat) > 0:
data = np.vstack((data, dat))
if return_header:
return data, 'arrival,service,departure,num_queued,num_total,q_id'
return data
|
Gets data from all the queues.
If none of the parameters are given then data from every
:class:`.QueueServer` is retrieved.
Parameters
----------
queues : int or an *array_like* of int, (optional)
The edge index (or an iterable of edge indices) identifying
the :class:`QueueServer(s)<.QueueServer>` whose data will
be retrieved.
edge : 2-tuple of int or *array_like* (optional)
Explicitly specify which queues to retrieve data from. Must
be either:
* A 2-tuple of the edge's source and target vertex
indices, or
* An iterable of 2-tuples of the edge's source and
target vertex indices.
edge_type : int or an iterable of int (optional)
A integer, or a collection of integers identifying which
edge types to retrieve data from.
return_header : bool (optonal, default: False)
Determines whether the column headers are returned.
Returns
-------
out : :class:`~numpy.ndarray`
* 1st: The arrival time of an agent.
* 2nd: The service start time of an agent.
* 3rd: The departure time of an agent.
* 4th: The length of the queue upon the agents arrival.
* 5th: The total number of :class:`Agents<.Agent>` in the
:class:`.QueueServer`.
* 6th: The :class:`QueueServer's<.QueueServer>` edge index.
out : str (optional)
A comma seperated string of the column headers. Returns
``'arrival,service,departure,num_queued,num_total,q_id'```
Examples
--------
Data is not collected by default. Before simulating, by sure to
turn it on (as well as initialize the network). The following
returns data from queues with ``edge_type`` 1 or 3:
>>> import queueing_tool as qt
>>> g = qt.generate_pagerank_graph(100, seed=13)
>>> net = qt.QueueNetwork(g, seed=13)
>>> net.start_collecting_data()
>>> net.initialize(10)
>>> net.simulate(2000)
>>> data = net.get_queue_data(edge_type=(1, 3))
To get data from an edge connecting two vertices do the
following:
>>> data = net.get_queue_data(edge=(1, 50))
To get data from several edges do the following:
>>> data = net.get_queue_data(edge=[(1, 50), (10, 91), (99, 99)])
You can specify the edge indices as well:
>>> data = net.get_queue_data(queues=(20, 14, 0, 4))
|
def IIR_sos_header(fname_out, SOS_mat):
Ns, Mcol = SOS_mat.shape
f = open(fname_out, 'wt')
f.write('//define a IIR SOS CMSIS-DSP coefficient array\n\n')
f.write('
f.write('
f.write('
f.write('
f.write('/*********************************************************/\n');
f.write('/* IIR SOS Filter Coefficients */\n');
f.write('float32_t ba_coeff[%d] = { //b0,b1,b2,a1,a2,... by stage\n' % (5 * Ns))
for k in range(Ns):
if (k < Ns - 1):
f.write(' %+-13e, %+-13e, %+-13e,\n' % \
(SOS_mat[k, 0], SOS_mat[k, 1], SOS_mat[k, 2]))
f.write(' %+-13e, %+-13e,\n' % \
(-SOS_mat[k, 4], -SOS_mat[k, 5]))
else:
f.write(' %+-13e, %+-13e, %+-13e,\n' % \
(SOS_mat[k, 0], SOS_mat[k, 1], SOS_mat[k, 2]))
f.write(' %+-13e, %+-13e\n' % \
(-SOS_mat[k, 4], -SOS_mat[k, 5]))
f.write('};\n')
f.write('/*********************************************************/\n')
f.close()
|
Write IIR SOS Header Files
File format is compatible with CMSIS-DSP IIR
Directform II Filter Functions
Mark Wickert March 2015-October 2016
|
def requirements_for_changes(self, changes):
requirements = []
reqs_set = set()
if isinstance(changes, str):
changes = changes.split('\n')
if not changes or changes[0].startswith('-'):
return requirements
for line in changes:
line = line.strip(' -+*')
if not line:
continue
match = IS_REQUIREMENTS_RE2.search(line)
if match:
for match in REQUIREMENTS_RE.findall(match.group(1)):
if match[1]:
version = '==' + match[2] if match[1].startswith(' to ') else match[1]
req_str = match[0] + version
else:
req_str = match[0]
if req_str not in reqs_set:
reqs_set.add(req_str)
try:
requirements.append(pkg_resources.Requirement.parse(req_str))
except Exception as e:
log.warn('Could not parse requirement "%s" from changes: %s', req_str, e)
return requirements
|
Parse changes for requirements
:param list changes:
|
def credentials_from_code(client_id, client_secret, scope, code,
redirect_uri='postmessage', http=None,
user_agent=None,
token_uri=oauth2client.GOOGLE_TOKEN_URI,
auth_uri=oauth2client.GOOGLE_AUTH_URI,
revoke_uri=oauth2client.GOOGLE_REVOKE_URI,
device_uri=oauth2client.GOOGLE_DEVICE_URI,
token_info_uri=oauth2client.GOOGLE_TOKEN_INFO_URI,
pkce=False,
code_verifier=None):
flow = OAuth2WebServerFlow(client_id, client_secret, scope,
redirect_uri=redirect_uri,
user_agent=user_agent,
auth_uri=auth_uri,
token_uri=token_uri,
revoke_uri=revoke_uri,
device_uri=device_uri,
token_info_uri=token_info_uri,
pkce=pkce,
code_verifier=code_verifier)
credentials = flow.step2_exchange(code, http=http)
return credentials
|
Exchanges an authorization code for an OAuth2Credentials object.
Args:
client_id: string, client identifier.
client_secret: string, client secret.
scope: string or iterable of strings, scope(s) to request.
code: string, An authorization code, most likely passed down from
the client
redirect_uri: string, this is generally set to 'postmessage' to match
the redirect_uri that the client specified
http: httplib2.Http, optional http instance to use to do the fetch
token_uri: string, URI for token endpoint. For convenience defaults
to Google's endpoints but any OAuth 2.0 provider can be
used.
auth_uri: string, URI for authorization endpoint. For convenience
defaults to Google's endpoints but any OAuth 2.0 provider
can be used.
revoke_uri: string, URI for revoke endpoint. For convenience
defaults to Google's endpoints but any OAuth 2.0 provider
can be used.
device_uri: string, URI for device authorization endpoint. For
convenience defaults to Google's endpoints but any OAuth
2.0 provider can be used.
pkce: boolean, default: False, Generate and include a "Proof Key
for Code Exchange" (PKCE) with your authorization and token
requests. This adds security for installed applications that
cannot protect a client_secret. See RFC 7636 for details.
code_verifier: bytestring or None, default: None, parameter passed
as part of the code exchange when pkce=True. If
None, a code_verifier will automatically be
generated as part of step1_get_authorize_url(). See
RFC 7636 for details.
Returns:
An OAuth2Credentials object.
Raises:
FlowExchangeError if the authorization code cannot be exchanged for an
access token
|
def parse_file_provider(uri):
providers = {'gs': job_model.P_GCS, 'file': job_model.P_LOCAL}
provider_found = re.match(r'^([A-Za-z][A-Za-z0-9+.-]{0,29})://', uri)
if provider_found:
prefix = provider_found.group(1).lower()
else:
prefix = 'file'
if prefix in providers:
return providers[prefix]
else:
raise ValueError('File prefix not supported: %s://' % prefix)
|
Find the file provider for a URI.
|
def _make_user_class(session, name):
attrs = session.eval('fieldnames(%s);' % name, nout=1).ravel().tolist()
methods = session.eval('methods(%s);' % name, nout=1).ravel().tolist()
ref = weakref.ref(session)
doc = _DocDescriptor(ref, name)
values = dict(__doc__=doc, _name=name, _ref=ref, _attrs=attrs,
__module__='oct2py.dynamic')
for method in methods:
doc = _MethodDocDescriptor(ref, name, method)
cls_name = '%s_%s' % (name, method)
method_values = dict(__doc__=doc)
method_cls = type(str(cls_name),
(OctaveUserClassMethod,), method_values)
values[method] = method_cls(ref, method, name)
for attr in attrs:
values[attr] = OctaveUserClassAttr(ref, attr, attr)
return type(str(name), (OctaveUserClass,), values)
|
Make an Octave class for a given class name
|
def put(self, credentials):
self.acquire_lock()
try:
self.locked_put(credentials)
finally:
self.release_lock()
|
Write a credential.
The Storage lock must be held when this is called.
Args:
credentials: Credentials, the credentials to store.
|
def getobjectsize(self, window_name, object_name=None):
if not object_name:
handle, name, app = self._get_window_handle(window_name)
else:
handle = self._get_object_handle(window_name, object_name)
return self._getobjectsize(handle)
|
Get object size
@param window_name: Window name to look for, either full name,
LDTP's name convention, or a Unix glob.
@type window_name: string
@param object_name: Object name to look for, either full name,
LDTP's name convention, or a Unix glob. Or menu heirarchy
@type object_name: string
@return: x, y, width, height on success.
@rtype: list
|
def reload_programs(self):
print("Reloading programs:")
for name, program in self._programs.items():
if getattr(program, 'program', None):
print(" - {}".format(program.meta.label))
program.program = resources.programs.load(program.meta)
|
Reload all shader programs with the reloadable flag set
|
def crown(self, depth=2):
nodes = []
for node in self.leaves: nodes += node.flatten(depth-1)
return cluster.unique(nodes)
|
Returns a list of leaves, nodes connected to leaves, etc.
|
def run_metrics(command, parser, cl_args, unknown_args):
cluster, role, env = cl_args['cluster'], cl_args['role'], cl_args['environ']
topology = cl_args['topology-name']
try:
result = tracker_access.get_topology_info(cluster, env, topology, role)
spouts = result['physical_plan']['spouts'].keys()
bolts = result['physical_plan']['bolts'].keys()
components = spouts + bolts
cname = cl_args['component']
if cname:
if cname in components:
components = [cname]
else:
Log.error('Unknown component: \'%s\'' % cname)
raise
except Exception:
Log.error("Fail to connect to tracker: \'%s\'", cl_args["tracker_url"])
return False
cresult = []
for comp in components:
try:
metrics = tracker_access.get_component_metrics(comp, cluster, env, topology, role)
except:
Log.error("Fail to connect to tracker: \'%s\'", cl_args["tracker_url"])
return False
stat, header = to_table(metrics)
cresult.append((comp, stat, header))
for i, (comp, stat, header) in enumerate(cresult):
if i != 0:
print('')
print('\'%s\' metrics:' % comp)
print(tabulate(stat, headers=header))
return True
|
run metrics subcommand
|
def guid(self, guid):
return self._json(self._get(self._build_url('guids', guid)), 200)['data']['type']
|
Determines JSONAPI type for provided GUID
|
def update(self, dicomset):
if not isinstance(dicomset, DicomFileSet):
raise ValueError('Given dicomset is not a DicomFileSet.')
self.items = list(set(self.items).update(dicomset))
|
Update this set with the union of itself and dicomset.
Parameters
----------
dicomset: DicomFileSet
|
def condense_ranges(cls, ranges):
result = []
if ranges:
ranges.sort(key=lambda tup: tup[0])
result.append(ranges[0])
for i in range(1, len(ranges)):
if result[-1][1] + 1 >= ranges[i][0]:
result[-1] = (result[-1][0], max(result[-1][1], ranges[i][1]))
else:
result.append(ranges[i])
return result
|
Sorts and removes overlaps
|
def _get_password_url(self):
password_url = None
if self._settings["user"] or self._settings["authorization"]:
if self._settings["url"]:
password_url = self._settings["url"]
elif self._settings["base_url"]:
password_url = self._settings["base_url"]
return password_url
|
Get URL used for authentication
Returns:
string: URL
|
def handle(self, *args, **options):
LOGGER.info('Starting assigning enterprise roles to users!')
role = options['role']
if role == ENTERPRISE_ADMIN_ROLE:
self._assign_enterprise_role_to_users(self._get_enterprise_admin_users_batch, options)
elif role == ENTERPRISE_OPERATOR_ROLE:
self._assign_enterprise_role_to_users(self._get_enterprise_operator_users_batch, options)
elif role == ENTERPRISE_LEARNER_ROLE:
self._assign_enterprise_role_to_users(self._get_enterprise_customer_users_batch, options)
elif role == ENTERPRISE_ENROLLMENT_API_ADMIN_ROLE:
self._assign_enterprise_role_to_users(self._get_enterprise_enrollment_api_admin_users_batch, options, True)
elif role == ENTERPRISE_CATALOG_ADMIN_ROLE:
self._assign_enterprise_role_to_users(self._get_enterprise_catalog_admin_users_batch, options, True)
else:
raise CommandError('Please provide a valid role name. Supported roles are {admin} and {learner}'.format(
admin=ENTERPRISE_ADMIN_ROLE,
learner=ENTERPRISE_LEARNER_ROLE
))
LOGGER.info('Successfully finished assigning enterprise roles to users!')
|
Entry point for managment command execution.
|
def _ignore_path(cls, path, ignore_list=None, white_list=None):
ignore_list = ignore_list or []
white_list = white_list or []
return (cls._matches_patterns(path, ignore_list) and
not cls._matches_patterns(path, white_list))
|
Returns a whether a path should be ignored or not.
|
def _get(self, *args, **kwargs):
messages, all_retrieved = super(StorageMixin, self)._get(*args, **kwargs)
if self.user.is_authenticated():
inbox_messages = self.backend.inbox_list(self.user)
else:
inbox_messages = []
return messages + inbox_messages, all_retrieved
|
Retrieve unread messages for current user, both from the inbox and
from other storages
|
def _dump_field(self, fd):
v = {}
v['label'] = Pbd.LABELS[fd.label]
v['type'] = fd.type_name if len(fd.type_name) > 0 else Pbd.TYPES[fd.type]
v['name'] = fd.name
v['number'] = fd.number
v['default'] = '[default = {}]'.format(fd.default_value) if len(fd.default_value) > 0 else ''
f = '{label} {type} {name} = {number} {default};'.format(**v)
f = ' '.join(f.split())
self._print(f)
if len(fd.type_name) > 0:
self.uses.append(fd.type_name)
|
Dump single field.
|
def set_pkg_license_declared(self, doc, lic):
self.assert_package_exists()
if not self.package_license_declared_set:
self.package_license_declared_set = True
if validations.validate_lics_conc(lic):
doc.package.license_declared = lic
return True
else:
raise SPDXValueError('Package::LicenseDeclared')
else:
raise CardinalityError('Package::LicenseDeclared')
|
Sets the package's declared license.
Raises SPDXValueError if data malformed.
Raises OrderError if no package previously defined.
Raises CardinalityError if already set.
|
def get_top_long_short_abs(positions, top=10):
positions = positions.drop('cash', axis='columns')
df_max = positions.max()
df_min = positions.min()
df_abs_max = positions.abs().max()
df_top_long = df_max[df_max > 0].nlargest(top)
df_top_short = df_min[df_min < 0].nsmallest(top)
df_top_abs = df_abs_max.nlargest(top)
return df_top_long, df_top_short, df_top_abs
|
Finds the top long, short, and absolute positions.
Parameters
----------
positions : pd.DataFrame
The positions that the strategy takes over time.
top : int, optional
How many of each to find (default 10).
Returns
-------
df_top_long : pd.DataFrame
Top long positions.
df_top_short : pd.DataFrame
Top short positions.
df_top_abs : pd.DataFrame
Top absolute positions.
|
def calculate_subgraph_edge_overlap(
graph: BELGraph,
annotation: str = 'Subgraph'
) -> Tuple[
Mapping[str, EdgeSet],
Mapping[str, Mapping[str, EdgeSet]],
Mapping[str, Mapping[str, EdgeSet]],
Mapping[str, Mapping[str, float]],
]:
sg2edge = defaultdict(set)
for u, v, d in graph.edges(data=True):
if not edge_has_annotation(d, annotation):
continue
sg2edge[d[ANNOTATIONS][annotation]].add((u, v))
subgraph_intersection = defaultdict(dict)
subgraph_union = defaultdict(dict)
result = defaultdict(dict)
for sg1, sg2 in itt.product(sg2edge, repeat=2):
subgraph_intersection[sg1][sg2] = sg2edge[sg1] & sg2edge[sg2]
subgraph_union[sg1][sg2] = sg2edge[sg1] | sg2edge[sg2]
result[sg1][sg2] = len(subgraph_intersection[sg1][sg2]) / len(subgraph_union[sg1][sg2])
return sg2edge, subgraph_intersection, subgraph_union, result
|
Build a DatafFame to show the overlap between different sub-graphs.
Options:
1. Total number of edges overlap (intersection)
2. Percentage overlap (tanimoto similarity)
:param graph: A BEL graph
:param annotation: The annotation to group by and compare. Defaults to 'Subgraph'
:return: {subgraph: set of edges}, {(subgraph 1, subgraph2): set of intersecting edges},
{(subgraph 1, subgraph2): set of unioned edges}, {(subgraph 1, subgraph2): tanimoto similarity},
|
def _findAll(self, name, attrs, text, limit, generator, **kwargs):
"Iterates over a generator looking for things that match."
if isinstance(name, SoupStrainer):
strainer = name
else:
strainer = SoupStrainer(name, attrs, text, **kwargs)
results = ResultSet(strainer)
g = generator()
while True:
try:
i = g.next()
except StopIteration:
break
if i:
found = strainer.search(i)
if found:
results.append(found)
if limit and len(results) >= limit:
break
return results
|
Iterates over a generator looking for things that match.
|
def centerdc_2_twosided(data):
N = len(data)
newpsd = np.concatenate((data[N//2:], (cshift(data[0:N//2], -1))))
return newpsd
|
Convert a center-dc PSD to a twosided PSD
|
def course_key_is_valid(course_key):
if course_key is None:
return False
try:
CourseKey.from_string(text_type(course_key))
except (InvalidKeyError, UnicodeDecodeError):
return False
return True
|
Course key object validation
|
def create(self, data):
self.app_id = None
if 'client_id' not in data:
raise KeyError('The authorized app must have a client_id')
if 'client_secret' not in data:
raise KeyError('The authorized app must have a client_secret')
return self._mc_client._post(url=self._build_path(), data=data)
|
Retrieve OAuth2-based credentials to associate API calls with your
application.
:param data: The request body parameters
:type data: :py:class:`dict`
data = {
"client_id": string*,
"client_secret": string*
}
|
def can(obj):
import_needed = False
for cls, canner in iteritems(can_map):
if isinstance(cls, string_types):
import_needed = True
break
elif istype(obj, cls):
return canner(obj)
if import_needed:
_import_mapping(can_map, _original_can_map)
return can(obj)
return obj
|
Prepare an object for pickling.
|
def _create_folder(local_folder, parent_folder_id):
new_folder = session.communicator.create_folder(
session.token, os.path.basename(local_folder), parent_folder_id)
return new_folder['folder_id']
|
Function for creating a remote folder and returning the id. This should be
a building block for user-level functions.
:param local_folder: full path to a local folder
:type local_folder: string
:param parent_folder_id: id of parent folder on the Midas Server instance,
where the new folder will be added
:type parent_folder_id: int | long
:returns: id of the remote folder that was created
:rtype: int | long
|
def joint_torques(self):
return as_flat_array(getattr(j, 'amotor', j).feedback[-1][:j.ADOF]
for j in self.joints)
|
Get a list of all current joint torques in the skeleton.
|
def get_heron_dir():
go_above_dirs = 9
path = "/".join(os.path.realpath(__file__).split('/')[:-go_above_dirs])
return normalized_class_path(path)
|
This will extract heron directory from .pex file.
For example,
when __file__ is '/Users/heron-user/bin/heron/heron/tools/common/src/python/utils/config.pyc', and
its real path is '/Users/heron-user/.heron/bin/heron/tools/common/src/python/utils/config.pyc',
the internal variable ``path`` would be '/Users/heron-user/.heron', which is the heron directory
This means the variable `go_above_dirs` below is 9.
:return: root location of the .pex file
|
def queue(users, label, extra_context=None, sender=None):
if extra_context is None:
extra_context = {}
if isinstance(users, QuerySet):
users = [row["pk"] for row in users.values("pk")]
else:
users = [user.pk for user in users]
notices = []
for user in users:
notices.append((user, label, extra_context, sender))
NoticeQueueBatch(pickled_data=base64.b64encode(pickle.dumps(notices))).save()
|
Queue the notification in NoticeQueueBatch. This allows for large amounts
of user notifications to be deferred to a seperate process running outside
the webserver.
|
def get_doc(doc_id, db_name, server_url='http://127.0.0.1:5984/', rev=None):
db = get_server(server_url)[db_name]
if rev:
headers, response = db.resource.get(doc_id, rev=rev)
return couchdb.client.Document(response)
return db[doc_id]
|
Return a CouchDB document, given its ID, revision and database name.
|
def enroll_users_in_course(cls, enterprise_customer, course_id, course_mode, emails):
existing_users, unregistered_emails = cls.get_users_by_email(emails)
successes = []
pending = []
failures = []
for user in existing_users:
succeeded = cls.enroll_user(enterprise_customer, user, course_mode, course_id)
if succeeded:
successes.append(user)
else:
failures.append(user)
for email in unregistered_emails:
pending_user = enterprise_customer.enroll_user_pending_registration(
email,
course_mode,
course_id
)
pending.append(pending_user)
return successes, pending, failures
|
Enroll existing users in a course, and create a pending enrollment for nonexisting users.
Args:
enterprise_customer: The EnterpriseCustomer which is sponsoring the enrollment
course_id (str): The unique identifier of the course in which we're enrolling
course_mode (str): The mode with which we're enrolling in the course
emails: An iterable of email addresses which need to be enrolled
Returns:
successes: A list of users who were successfully enrolled in the course
pending: A list of PendingEnterpriseCustomerUsers who were successfully linked and had
pending enrollments created for them in the database
failures: A list of users who could not be enrolled in the course
|
def _get_const_info(const_index, const_list):
argval = const_index
if const_list is not None:
try:
argval = const_list[const_index]
except IndexError:
raise ValidationError("Consts value out of range: {}".format(const_index)) from None
return argval, repr(argval)
|
Helper to get optional details about const references
Returns the dereferenced constant and its repr if the constant
list is defined.
Otherwise returns the constant index and its repr().
|
def isodate(datestamp=None, microseconds=False):
datestamp = datestamp or datetime.datetime.now()
if not microseconds:
usecs = datetime.timedelta(microseconds=datestamp.microsecond)
datestamp = datestamp - usecs
return datestamp.isoformat(b' ' if PY2 else u' ')
|
Return current or given time formatted according to ISO-8601.
|
def dist_baystat(src, tar, min_ss_len=None, left_ext=None, right_ext=None):
return Baystat().dist(src, tar, min_ss_len, left_ext, right_ext)
|
Return the Baystat distance.
This is a wrapper for :py:meth:`Baystat.dist`.
Parameters
----------
src : str
Source string for comparison
tar : str
Target string for comparison
min_ss_len : int
Minimum substring length to be considered
left_ext : int
Left-side extension length
right_ext : int
Right-side extension length
Returns
-------
float
The Baystat distance
Examples
--------
>>> round(dist_baystat('cat', 'hat'), 12)
0.333333333333
>>> dist_baystat('Niall', 'Neil')
0.6
>>> round(dist_baystat('Colin', 'Cuilen'), 12)
0.833333333333
>>> dist_baystat('ATCG', 'TAGC')
1.0
|
def dropbox_submission(dropbox, request):
try:
data = dropbox_schema.deserialize(request.POST)
except Exception:
return HTTPFound(location=request.route_url('dropbox_form'))
dropbox.message = data.get('message')
if 'testing_secret' in dropbox.settings:
dropbox.from_watchdog = is_equal(
unicode(dropbox.settings['test_submission_secret']),
data.pop('testing_secret', u''))
if data.get('upload') is not None:
dropbox.add_attachment(data['upload'])
dropbox.submit()
drop_url = request.route_url('dropbox_view', drop_id=dropbox.drop_id)
print("Created dropbox %s" % drop_url)
return HTTPFound(location=drop_url)
|
handles the form submission, redirects to the dropbox's status page.
|
def encode_body(req):
if isinstance(req.body, text_type):
split = req.headers.get('content-type', 'text/plain').split(';')
if len(split) == 2:
ct, cs = split
cs = cs.split('=')[1]
req.body = req.body.encode(cs)
else:
ct = split[0]
if (ct == 'application/x-www-form-urlencoded' or
'x-amz-' in ct):
req.body = req.body.encode()
else:
req.body = req.body.encode('utf-8')
req.headers['content-type'] = ct + '; charset=utf-8'
|
Encode body of request to bytes and update content-type if required.
If the body of req is Unicode then encode to the charset found in
content-type header if present, otherwise UTF-8, or ASCII if
content-type is application/x-www-form-urlencoded. If encoding to UTF-8
then add charset to content-type. Modifies req directly, does not
return a modified copy.
req -- Requests PreparedRequest object
|
def rows_after(self):
rows_after = []
for mesh in self.produced_meshes:
if mesh.is_consumed():
row = mesh.consuming_row
if rows_after not in rows_after:
rows_after.append(row)
return rows_after
|
The rows that consume meshes from this row.
:rtype: list
:return: a list of rows that consume meshes from this row. Each row
occurs only once. They are sorted by the first occurrence in the
instructions.
|
def get_suggested_filename(metadata):
if metadata.get('title') and metadata.get('track_number'):
suggested_filename = '{track_number:0>2} {title}'.format(**metadata)
elif metadata.get('title') and metadata.get('trackNumber'):
suggested_filename = '{trackNumber:0>2} {title}'.format(**metadata)
elif metadata.get('title') and metadata.get('tracknumber'):
suggested_filename = '{tracknumber:0>2} {title}'.format(**metadata)
else:
suggested_filename = '00 {}'.format(metadata.get('title', ''))
return suggested_filename
|
Generate a filename for a song based on metadata.
Parameters:
metadata (dict): A metadata dict.
Returns:
A filename.
|
def merge_images(images, axis='t'):
if not images:
return None
axis_dim = {'x': 0,
'y': 1,
'z': 2,
't': 3,
}
if axis not in axis_dim:
raise ValueError('Expected `axis` to be one of ({}), got {}.'.format(set(axis_dim.keys()), axis))
img1 = images[0]
for img in images:
check_img_compatibility(img1, img)
image_data = []
for img in images:
image_data.append(check_img(img).get_data())
work_axis = axis_dim[axis]
ndim = image_data[0].ndim
if ndim - 1 < work_axis:
image_data = [np.expand_dims(img, axis=work_axis) for img in image_data]
return np.concatenate(image_data, axis=work_axis)
|
Concatenate `images` in the direction determined in `axis`.
Parameters
----------
images: list of str or img-like object.
See NeuroImage constructor docstring.
axis: str
't' : concatenate images in time
'x' : concatenate images in the x direction
'y' : concatenate images in the y direction
'z' : concatenate images in the z direction
Returns
-------
merged: img-like object
|
def connect(self):
if not self.connected():
self._ws = create_connection(self.WS_URI)
message = {
'type':self.WS_TYPE,
'product_id':self.WS_PRODUCT_ID
}
self._ws.send(dumps(message))
with self._lock:
if not self._thread:
thread = Thread(target=self._keep_alive_thread, args=[])
thread.start()
|
Connects and subscribes to the WebSocket Feed.
|
def finalizeOp(self, ops, account, permission, **kwargs):
if "append_to" in kwargs and kwargs["append_to"]:
if self.proposer:
log.warning(
"You may not use append_to and self.proposer at "
"the same time. Append new_proposal(..) instead"
)
append_to = kwargs["append_to"]
parent = append_to.get_parent()
assert isinstance(
append_to, (self.transactionbuilder_class, self.proposalbuilder_class)
)
append_to.appendOps(ops)
if isinstance(append_to, self.proposalbuilder_class):
parent.appendSigner(append_to.proposer, permission)
else:
parent.appendSigner(account, permission)
return append_to.get_parent()
elif self.proposer:
proposal = self.proposal()
proposal.set_proposer(self.proposer)
proposal.set_expiration(self.proposal_expiration)
proposal.set_review(self.proposal_review)
proposal.appendOps(ops)
else:
self.txbuffer.appendOps(ops)
if "fee_asset" in kwargs and kwargs["fee_asset"]:
self.txbuffer.set_fee_asset(kwargs["fee_asset"])
if self.unsigned:
self.txbuffer.addSigningInformation(account, permission)
return self.txbuffer
elif self.bundle:
self.txbuffer.appendSigner(account, permission)
return self.txbuffer.json()
else:
self.txbuffer.appendSigner(account, permission)
self.txbuffer.sign()
return self.txbuffer.broadcast()
|
This method obtains the required private keys if present in
the wallet, finalizes the transaction, signs it and
broadacasts it
:param operation ops: The operation (or list of operaions) to
broadcast
:param operation account: The account that authorizes the
operation
:param string permission: The required permission for
signing (active, owner, posting)
:param object append_to: This allows to provide an instance of
ProposalsBuilder (see :func:`new_proposal`) or
TransactionBuilder (see :func:`new_tx()`) to specify
where to put a specific operation.
... note:: ``append_to`` is exposed to every method used in the
this class
... note::
If ``ops`` is a list of operation, they all need to be
signable by the same key! Thus, you cannot combine ops
that require active permission with ops that require
posting permission. Neither can you use different
accounts for different operations!
... note:: This uses ``txbuffer`` as instance of
:class:`transactionbuilder.TransactionBuilder`.
You may want to use your own txbuffer
|
def org_by_name(self, hostname):
addr = self._gethostbyname(hostname)
return self.org_by_addr(addr)
|
Returns Organization, ISP, or ASNum name for given hostname.
:arg hostname: Hostname (e.g. example.com)
|
def human_transactions(self):
txs = []
for tx in self.transactions:
if tx.depth == 0:
txs.append(tx)
return tuple(txs)
|
Completed human transaction
|
def issue_line_with_user(self, line, issue):
if not issue.get("pull_request") or not self.options.author:
return line
if not issue.get("user"):
line += u" (Null user)"
elif self.options.username_as_tag:
line += u" (@{0})".format(
issue["user"]["login"]
)
else:
line += u" ([{0}]({1}))".format(
issue["user"]["login"], issue["user"]["html_url"]
)
return line
|
If option author is enabled, a link to the profile of the author
of the pull reqest will be added to the issue line.
:param str line: String containing a markdown-formatted single issue.
:param dict issue: Fetched issue from GitHub.
:rtype: str
:return: Issue line with added author link.
|
def _get_children_as_string(node):
out = []
if node:
for child in node:
if child.nodeType == child.TEXT_NODE:
out.append(child.data)
else:
out.append(_get_children_as_string(child.childNodes))
return ''.join(out)
|
Iterate through all the children of a node.
Returns one string containing the values from all the text-nodes
recursively.
|
def _import_mapping(mapping, original=None):
for key, value in list(mapping.items()):
if isinstance(key, string_types):
try:
cls = import_item(key)
except Exception:
if original and key not in original:
print("ERROR: canning class not importable: %r", key, exc_info=True)
mapping.pop(key)
else:
mapping[cls] = mapping.pop(key)
|
Import any string-keys in a type mapping.
|
def init_raspbian_vm(self):
r = self.local_renderer
r.comment('Installing system packages.')
r.sudo('add-apt-repository ppa:linaro-maintainers/tools')
r.sudo('apt-get update')
r.sudo('apt-get install libsdl-dev qemu-system')
r.comment('Download image.')
r.local('wget https://downloads.raspberrypi.org/raspbian_lite_latest')
r.local('unzip raspbian_lite_latest.zip')
r.comment('Find start of the Linux ext4 partition.')
r.local(
"parted -s 2016-03-18-raspbian-jessie-lite.img unit B print | "
"awk '/^Number/{{p=1;next}}; p{{gsub(/[^[:digit:]]/, "", $2); print $2}}' | sed -n 2p", assign_to='START')
r.local('mkdir -p {raspbian_mount_point}')
r.sudo('mount -v -o offset=$START -t ext4 {raspbian_image} $MNT')
r.comment('Comment out everything in ld.so.preload')
r.local("sed -i 's/^/
r.comment('Comment out entries containing /dev/mmcblk in fstab.')
r.local("sed -i '/mmcblk/ s?^?
r.sudo('umount {raspbian_mount_point}')
r.comment('Download kernel.')
r.local('wget https://github.com/dhruvvyas90/qemu-rpi-kernel/blob/master/{raspbian_kernel}?raw=true')
r.local('mv {raspbian_kernel} {libvirt_images_dir}')
r.comment('Creating libvirt machine.')
r.local('virsh define libvirt-raspbian.xml')
r.comment('You should now be able to boot the VM by running:')
r.comment('')
r.comment(' qemu-system-arm -kernel {libvirt_boot_dir}/{raspbian_kernel} '
'-cpu arm1176 -m 256 -M versatilepb -serial stdio -append "root=/dev/sda2 rootfstype=ext4 rw" '
'-hda {libvirt_images_dir}/{raspbian_image}')
r.comment('')
r.comment('Or by running virt-manager.')
|
Creates an image for running Raspbian in a QEMU virtual machine.
Based on the guide at:
https://github.com/dhruvvyas90/qemu-rpi-kernel/wiki/Emulating-Jessie-image-with-4.1.x-kernel
|
def read_chunk(filename, offset=-1, length=-1, escape_data=False):
try:
length = int(length)
offset = int(offset)
except ValueError:
return {}
if not os.path.isfile(filename):
return {}
try:
fstat = os.stat(filename)
except Exception:
return {}
if offset == -1:
offset = fstat.st_size
if length == -1:
length = fstat.st_size - offset
with open(filename, "r") as fp:
fp.seek(offset)
try:
data = fp.read(length)
except IOError:
return {}
if data:
data = _escape_data(data) if escape_data else data
return dict(offset=offset, length=len(data), data=data)
return dict(offset=offset, length=0)
|
Read a chunk of a file from an offset upto the length.
|
def find_matching(cls, path, patterns):
for pattern in patterns:
if pattern.match(path):
yield pattern
|
Yield all matching patterns for path.
|
def export(self):
graph = nx.MultiDiGraph()
regions = self.network.getRegions()
for idx in xrange(regions.getCount()):
regionPair = regions.getByIndex(idx)
regionName = regionPair[0]
graph.add_node(regionName, label=regionName)
for linkName, link in self.network.getLinks():
graph.add_edge(link.getSrcRegionName(),
link.getDestRegionName(),
src=link.getSrcOutputName(),
dest=link.getDestInputName())
return graph
|
Exports a network as a networkx MultiDiGraph intermediate representation
suitable for visualization.
:return: networkx MultiDiGraph
|
def mapCellsToColumns(self, cells):
cellsForColumns = defaultdict(set)
for cell in cells:
column = self.columnForCell(cell)
cellsForColumns[column].add(cell)
return cellsForColumns
|
Maps cells to the columns they belong to.
:param cells: (set) Cells
:returns: (dict) Mapping from columns to their cells in `cells`
|
def get_notification_subject_line(course_name, template_configuration=None):
stock_subject_template = _('You\'ve been enrolled in {course_name}!')
default_subject_template = getattr(
settings,
'ENTERPRISE_ENROLLMENT_EMAIL_DEFAULT_SUBJECT_LINE',
stock_subject_template,
)
if template_configuration is not None and template_configuration.subject_line:
final_subject_template = template_configuration.subject_line
else:
final_subject_template = default_subject_template
try:
return final_subject_template.format(course_name=course_name)
except KeyError:
pass
try:
return default_subject_template.format(course_name=course_name)
except KeyError:
return stock_subject_template.format(course_name=course_name)
|
Get a subject line for a notification email.
The method is designed to fail in a "smart" way; if we can't render a
database-backed subject line template, then we'll fall back to a template
saved in the Django settings; if we can't render _that_ one, then we'll
fall through to a friendly string written into the code.
One example of a failure case in which we want to fall back to a stock template
would be if a site admin entered a subject line string that contained a template
tag that wasn't available, causing a KeyError to be raised.
Arguments:
course_name (str): Course name to be rendered into the string
template_configuration: A database-backed object with a stored subject line template
|
def wheel_helper(pos, length, cycle_step):
return wheel_color((pos * len(_WHEEL) / length) + cycle_step)
|
Helper for wheel_color that distributes colors over length and
allows shifting position.
|
def findAll(self, name=None, attrs={}, recursive=True, text=None,
limit=None, **kwargs):
generator = self.recursiveChildGenerator
if not recursive:
generator = self.childGenerator
return self._findAll(name, attrs, text, limit, generator, **kwargs)
|
Extracts a list of Tag objects that match the given
criteria. You can specify the name of the Tag and any
attributes you want the Tag to have.
The value of a key-value pair in the 'attrs' map can be a
string, a list of strings, a regular expression object, or a
callable that takes a string and returns whether or not the
string matches for some custom definition of 'matches'. The
same is true of the tag name.
|
def is_empty(self):
if len(self.val) != 0:
if isinstance(self.val, str_types):
self._err('Expected <%s> to be empty string, but was not.' % self.val)
else:
self._err('Expected <%s> to be empty, but was not.' % self.val)
return self
|
Asserts that val is empty.
|
def _reproject(self, p):
nulls = self.problem.nullspace
equalities = self.problem.equalities
if np.allclose(equalities.dot(p), self.problem.b,
rtol=0, atol=self.feasibility_tol):
new = p
else:
LOGGER.info("feasibility violated in sample"
" %d, trying to reproject" % self.n_samples)
new = nulls.dot(nulls.T.dot(p))
if any(new != p):
LOGGER.info("reprojection failed in sample"
" %d, using random point in space" % self.n_samples)
new = self._random_point()
return new
|
Reproject a point into the feasibility region.
This function is guaranteed to return a new feasible point. However,
no guarantees in terms of proximity to the original point can be made.
Parameters
----------
p : numpy.array
The current sample point.
Returns
-------
numpy.array
A new feasible point. If `p` was feasible it wil return p.
|
def add_tags(self, tags):
return self.get_data(
"firewalls/%s/tags" % self.id,
type=POST,
params={"tags": tags}
)
|
Add tags to this Firewall.
|
def get_md_status(self, line):
ret = {}
splitted = split('\W+', line)
if len(splitted) < 7:
ret['available'] = None
ret['used'] = None
ret['config'] = None
else:
ret['available'] = splitted[-4]
ret['used'] = splitted[-3]
ret['config'] = splitted[-2]
return ret
|
Return a dict of md status define in the line.
|
def NormalizePath(path):
if path.endswith('/') or path.endswith('\\'):
slash = os.path.sep
else:
slash = ''
return os.path.normpath(path) + slash
|
Normalizes a path maintaining the final slashes.
Some environment variables need the final slash in order to work.
Ex. The SOURCES_DIR set by subversion must end with a slash because of the way it is used
in the Visual Studio projects.
:param unicode path:
The path to normalize.
:rtype: unicode
:returns:
Normalized path
|
def numeric_task_id(task_id):
if task_id is not None:
if task_id.startswith('task-'):
return int(task_id[len('task-'):])
else:
return int(task_id)
|
Converts a task-id to the numeric task-id.
Args:
task_id: task-id in either task-n or n format
Returns:
n
|
def report_read_counts(self, filename, grp_wise=False, reorder='as-is', notes=None):
expected_read_counts = self.probability.sum(axis=APM.Axis.READ)
if grp_wise:
lname = self.probability.gname
expected_read_counts = expected_read_counts * self.grp_conv_mat
else:
lname = self.probability.lname
total_read_counts = expected_read_counts.sum(axis=0)
if reorder == 'decreasing':
report_order = np.argsort(total_read_counts.flatten())
report_order = report_order[::-1]
elif reorder == 'increasing':
report_order = np.argsort(total_read_counts.flatten())
elif reorder == 'as-is':
report_order = np.arange(len(lname))
cntdata = np.vstack((expected_read_counts, total_read_counts))
fhout = open(filename, 'w')
fhout.write("locus\t" + "\t".join(self.probability.hname) + "\ttotal")
if notes is not None:
fhout.write("\tnotes")
fhout.write("\n")
for locus_id in report_order:
lname_cur = lname[locus_id]
fhout.write("\t".join([lname_cur] + map(str, cntdata[:, locus_id].ravel())))
if notes is not None:
fhout.write("\t%s" % notes[lname_cur])
fhout.write("\n")
fhout.close()
|
Exports expected read counts
:param filename: File name for output
:param grp_wise: whether the report is at isoform level or gene level
:param reorder: whether the report should be either 'decreasing' or 'increasing' order or just 'as-is'
:return: Nothing but the method writes a file
|
def get_repr(self, obj, referent=None):
objtype = type(obj)
typename = str(objtype.__module__) + "." + objtype.__name__
prettytype = typename.replace("__builtin__.", "")
name = getattr(obj, "__name__", "")
if name:
prettytype = "%s %r" % (prettytype, name)
key = ""
if referent:
key = self.get_refkey(obj, referent)
url = reverse('dowser_trace_object', args=(
typename,
id(obj)
))
return ('<a class="objectid" href="%s">%s</a> '
'<span class="typename">%s</span>%s<br />'
'<span class="repr">%s</span>'
% (url, id(obj), prettytype, key, get_repr(obj, 100))
)
|
Return an HTML tree block describing the given object.
|
def _adaptToResource(self, result):
if result is None:
return NotFound()
spinneretResource = ISpinneretResource(result, None)
if spinneretResource is not None:
return SpinneretResource(spinneretResource)
renderable = IRenderable(result, None)
if renderable is not None:
return _RenderableResource(renderable)
resource = IResource(result, None)
if resource is not None:
return resource
if isinstance(result, URLPath):
return Redirect(str(result))
return result
|
Adapt a result to `IResource`.
Several adaptions are tried they are, in order: ``None``,
`IRenderable <twisted:twisted.web.iweb.IRenderable>`, `IResource
<twisted:twisted.web.resource.IResource>`, and `URLPath
<twisted:twisted.python.urlpath.URLPath>`. Anything else is returned as
is.
A `URLPath <twisted:twisted.python.urlpath.URLPath>` is treated as
a redirect.
|
def textpath(self, txt, x, y, width=None, height=1000000, enableRendering=False, **kwargs):
txt = self.Text(txt, x, y, width, height, **kwargs)
path = txt.path
if draw:
path.draw()
return path
|
Draws an outlined path of the input text
|
def anomalyRemoveLabels(self, start, end, labelFilter):
self._getAnomalyClassifier().getSelf().removeLabels(start, end, labelFilter)
|
Remove labels from the anomaly classifier within this model. Removes all
records if ``labelFilter==None``, otherwise only removes the labels equal to
``labelFilter``.
:param start: (int) index to start removing labels
:param end: (int) index to end removing labels
:param labelFilter: (string) If specified, only removes records that match
|
def iterwindows(self, count=64, window_shape=(256, 256)):
if count is None:
while True:
yield self.randwindow(window_shape)
else:
for i in xrange(count):
yield self.randwindow(window_shape)
|
Iterate over random windows of an image
Args:
count (int): the number of the windows to generate. Defaults to 64, if `None` will continue to iterate over random windows until stopped.
window_shape (tuple): The desired shape of each image as (height, width) in pixels.
Yields:
image: an image of the given shape and same type.
|
def proto_04_01_MTmon70s2(abf=exampleABF):
standard_inspect(abf)
swhlab.memtest.memtest(abf)
swhlab.memtest.checkSweep(abf)
swhlab.plot.save(abf,tag='check',resize=False)
swhlab.memtest.plot_standard4(abf)
swhlab.plot.save(abf,tag='memtests')
|
repeated membrane tests, likely with drug added. Maybe IPSCs.
|
def getTopologyByClusterRoleEnvironAndName(self, cluster, role, environ, topologyName):
topologies = list(filter(lambda t: t.name == topologyName
and t.cluster == cluster
and (not role or t.execution_state.role == role)
and t.environ == environ, self.topologies))
if not topologies or len(topologies) > 1:
if role is not None:
raise Exception("Topology not found for {0}, {1}, {2}, {3}".format(
cluster, role, environ, topologyName))
else:
raise Exception("Topology not found for {0}, {1}, {2}".format(
cluster, environ, topologyName))
return topologies[0]
|
Find and return the topology given its cluster, environ, topology name, and
an optional role.
Raises exception if topology is not found, or more than one are found.
|
def hierarchy_flatten(annotation):
intervals, values = annotation.to_interval_values()
ordering = dict()
for interval, value in zip(intervals, values):
level = value['level']
if level not in ordering:
ordering[level] = dict(intervals=list(), labels=list())
ordering[level]['intervals'].append(interval)
ordering[level]['labels'].append(value['label'])
levels = sorted(list(ordering.keys()))
hier_intervals = [ordering[level]['intervals'] for level in levels]
hier_labels = [ordering[level]['labels'] for level in levels]
return hier_intervals, hier_labels
|
Flatten a multi_segment annotation into mir_eval style.
Parameters
----------
annotation : jams.Annotation
An annotation in the `multi_segment` namespace
Returns
-------
hier_intervalss : list
A list of lists of intervals, ordered by increasing specificity.
hier_labels : list
A list of lists of labels, ordered by increasing specificity.
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.