code
stringlengths 59
4.4k
| docstring
stringlengths 5
7.69k
|
|---|---|
def bohachevsky1(theta):
x, y = theta
obj = x ** 2 + 2 * y ** 2 - 0.3 * np.cos(3 * np.pi * x) - 0.4 * np.cos(4 * np.pi * y) + 0.7
grad = np.array([
2 * x + 0.3 * np.sin(3 * np.pi * x) * 3 * np.pi,
4 * y + 0.4 * np.sin(4 * np.pi * y) * 4 * np.pi,
])
return obj, grad
|
One of the Bohachevsky functions
|
def MultiIndicator(pos, size, dtype):
x = numpy.zeros(size, dtype=dtype)
if hasattr(pos, '__iter__'):
for i in pos: x[i] = 1
else: x[pos] = 1
return x
|
Returns an array of length size and type dtype that is everywhere 0,
except in the indices listed in sequence pos.
:param pos: A single integer or sequence of integers that specify
the position of ones to be set.
:param size: The total size of the array to be returned.
:param dtype: The element type (compatible with NumPy array())
of the array to be returned.
:returns: An array of length size and element type dtype.
|
def get_course_track_selection_url(course_run, query_parameters):
try:
course_root = reverse('course_modes_choose', kwargs={'course_id': course_run['key']})
except KeyError:
LOGGER.exception(
"KeyError while parsing course run data.\nCourse Run: \n[%s]", course_run,
)
raise
url = '{}{}'.format(
settings.LMS_ROOT_URL,
course_root
)
course_run_url = update_query_parameters(url, query_parameters)
return course_run_url
|
Return track selection url for the given course.
Arguments:
course_run (dict): A dictionary containing course run metadata.
query_parameters (dict): A dictionary containing query parameters to be added to course selection url.
Raises:
(KeyError): Raised when course run dict does not have 'key' key.
Returns:
(str): Course track selection url.
|
def update(check, enter_parameters, version):
if check:
if temple.update.up_to_date(version=version):
print('Temple package is up to date')
else:
msg = (
'This temple package is out of date with the latest template.'
' Update your package by running "temple update" and commiting changes.'
)
raise temple.exceptions.NotUpToDateWithTemplateError(msg)
else:
temple.update.update(new_version=version, enter_parameters=enter_parameters)
|
Update package with latest template. Must be inside of the project
folder to run.
Using "-e" will prompt for re-entering the template parameters again
even if the project is up to date.
Use "-v" to update to a particular version of a template.
Using "-c" will perform a check that the project is up to date
with the latest version of the template (or the version specified by "-v").
No updating will happen when using this option.
|
def set_user_jobs(session, job_ids):
jobs_data = {
'jobs[]': job_ids
}
response = make_put_request(session, 'self/jobs', json_data=jobs_data)
json_data = response.json()
if response.status_code == 200:
return json_data['status']
else:
raise UserJobsNotSetException(
message=json_data['message'],
error_code=json_data['error_code'],
request_id=json_data['request_id'])
|
Replace the currently authenticated user's list of jobs with a new list of
jobs
|
def usage_function(parser):
parser.print_usage()
print('')
print('available functions:')
for function in sorted(FUNCTION):
doc = FUNCTION[function].__doc__.strip().splitlines()[0]
print(' %-12s %s' % (function + ':', doc))
return 0
|
Show usage and available curve functions.
|
def generate_gamete(self, egg_or_sperm_word):
p_rate_of_mutation = [0.9, 0.1]
should_use_mutant_pool = (npchoice([0,1], 1, p=p_rate_of_mutation)[0] == 1)
if should_use_mutant_pool:
pool = tokens.secondary_tokens
else:
pool = tokens.primary_tokens
return get_matches(egg_or_sperm_word, pool, 23)
|
Extract 23 'chromosomes' aka words from 'gene pool' aka list of tokens
by searching the list of tokens for words that are related to the given
egg_or_sperm_word.
|
def get_evidences_by_pmid(graph: BELGraph, pmids: Union[str, Iterable[str]]):
result = defaultdict(set)
for _, _, _, data in filter_edges(graph, build_pmid_inclusion_filter(pmids)):
result[data[CITATION][CITATION_REFERENCE]].add(data[EVIDENCE])
return dict(result)
|
Get a dictionary from the given PubMed identifiers to the sets of all evidence strings associated with each
in the graph.
:param graph: A BEL graph
:param pmids: An iterable of PubMed identifiers, as strings. Is consumed and converted to a set.
:return: A dictionary of {pmid: set of all evidence strings}
:rtype: dict
|
def are_domains_equal(domain1, domain2):
domain1 = domain1.encode("idna")
domain2 = domain2.encode("idna")
return domain1.lower() == domain2.lower()
|
Compare two International Domain Names.
:Parameters:
- `domain1`: domains name to compare
- `domain2`: domains name to compare
:Types:
- `domain1`: `unicode`
- `domain2`: `unicode`
:return: True `domain1` and `domain2` are equal as domain names.
|
def get_element_masses(self):
result = [0] * len(self.material.elements)
for compound in self.material.compounds:
c = self.get_compound_mass(compound)
f = [c * x for x in emf(compound, self.material.elements)]
result = [v+f[ix] for ix, v in enumerate(result)]
return result
|
Get the masses of elements in the package.
:returns: [kg] An array of element masses. The sequence of the elements
in the result corresponds with the sequence of elements in the
element list of the material.
|
def remove_group_user(self, group_id, user_id):
response = self._perform_request(
url='/um/groups/%s/users/%s' % (group_id, user_id),
method='DELETE')
return response
|
Removes a user from a group.
:param group_id: The unique ID of the group.
:type group_id: ``str``
:param user_id: The unique ID of the user.
:type user_id: ``str``
|
def mod2md(module, title, title_api_section, toc=True, maxdepth=0):
docstr = module.__doc__
text = doctrim(docstr)
lines = text.split('\n')
sections = find_sections(lines)
if sections:
level = min(n for n,t in sections) - 1
else:
level = 1
api_md = []
api_sec = []
if title_api_section and module.__all__:
sections.append((level+1, title_api_section))
for name in module.__all__:
api_sec.append((level+2, "`" + name + "`"))
api_md += ['', '']
entry = module.__dict__[name]
if entry.__doc__:
md, sec = doc2md(entry.__doc__, "`" + name + "`",
min_level=level+2, more_info=True, toc=False)
api_sec += sec
api_md += md
sections += api_sec
head = next((i for i, l in enumerate(lines) if is_heading(l)), 0)
md = [
make_heading(level, title),
"",
] + lines[:head]
if toc:
md += make_toc(sections, maxdepth)
md += ['']
md += _doc2md(lines[head:])
md += [
'',
'',
make_heading(level+1, title_api_section),
]
if toc:
md += ['']
md += make_toc(api_sec, 1)
md += api_md
return "\n".join(md)
|
Generate markdown document from module, including API section.
|
def storage(self, provider='osfstorage'):
stores = self._json(self._get(self._storages_url), 200)
stores = stores['data']
for store in stores:
provides = self._get_attribute(store, 'attributes', 'provider')
if provides == provider:
return Storage(store, self.session)
raise RuntimeError("Project has no storage "
"provider '{}'".format(provider))
|
Return storage `provider`.
|
def num_instances(self):
num = 0
components = self.spouts() + self.bolts()
for component in components:
config = component.comp.config
for kvs in config.kvs:
if kvs.key == api_constants.TOPOLOGY_COMPONENT_PARALLELISM:
num += int(kvs.value)
break
return num
|
Number of spouts + bolts
|
def _manage_args(parser, args):
for item in data.CONFIGURABLE_OPTIONS:
action = parser._option_string_actions[item]
choices = default = ''
input_value = getattr(args, action.dest)
new_val = None
if not args.noinput:
if action.choices:
choices = ' (choices: {0})'.format(', '.join(action.choices))
if input_value:
if type(input_value) == list:
default = ' [default {0}]'.format(', '.join(input_value))
else:
default = ' [default {0}]'.format(input_value)
while not new_val:
prompt = '{0}{1}{2}: '.format(action.help, choices, default)
if action.choices in ('yes', 'no'):
new_val = utils.query_yes_no(prompt)
else:
new_val = compat.input(prompt)
new_val = compat.clean(new_val)
if not new_val and input_value:
new_val = input_value
if new_val and action.dest == 'templates':
if new_val != 'no' and not os.path.isdir(new_val):
sys.stdout.write('Given directory does not exists, retry\n')
new_val = False
if new_val and action.dest == 'db':
action(parser, args, new_val, action.option_strings)
new_val = getattr(args, action.dest)
else:
if not input_value and action.required:
raise ValueError(
'Option {0} is required when in no-input mode'.format(action.dest)
)
new_val = input_value
if action.dest == 'db':
action(parser, args, new_val, action.option_strings)
new_val = getattr(args, action.dest)
if action.dest == 'templates' and (new_val == 'no' or not os.path.isdir(new_val)):
new_val = False
if action.dest in ('bootstrap', 'starting_page'):
new_val = (new_val == 'yes')
setattr(args, action.dest, new_val)
return args
|
Checks and validate provided input
|
def validate_account_names(self, names):
for name in names:
if self.get_account(name) is None:
raise ValueError("The account '{}' does not exist in the"
" general ledger structure.".format(name))
|
Validates whether the accounts in a list of account names exists.
:param names: The names of the accounts.
:returns: The descendants of the account.
|
def search_pattern(regex):
prog = re.compile(regex)
def checker(v):
result = prog.search(v)
if result is None:
raise ValueError(v)
return checker
|
Return a value check function which raises a ValueError if the supplied
regular expression does not match anywhere in the value, see also
`re.search`.
|
def setup_saver(self):
if self.execution_type == "single":
global_variables = self.get_variables(include_submodules=True, include_nontrainable=True)
else:
global_variables = self.global_model.get_variables(include_submodules=True, include_nontrainable=True)
for c in self.get_savable_components():
c.register_saver_ops()
self.saver = tf.train.Saver(
var_list=global_variables,
reshape=False,
sharded=False,
max_to_keep=5,
keep_checkpoint_every_n_hours=10000.0,
name=None,
restore_sequentially=False,
saver_def=None,
builder=None,
defer_build=False,
allow_empty=True,
write_version=tf.train.SaverDef.V2,
pad_step_number=False,
save_relative_paths=True
)
|
Creates the tf.train.Saver object and stores it in self.saver.
|
def getDescription(self):
description = {'name':self.name, 'fields':[f.name for f in self.fields], \
'numRecords by field':[f.numRecords for f in self.fields]}
return description
|
Returns a description of the dataset
|
def authenticate(username, password, service='login', encoding='utf-8',
resetcred=True):
if sys.version_info >= (3,):
if isinstance(username, str):
username = username.encode(encoding)
if isinstance(password, str):
password = password.encode(encoding)
if isinstance(service, str):
service = service.encode(encoding)
@conv_func
def my_conv(n_messages, messages, p_response, app_data):
addr = calloc(n_messages, sizeof(PamResponse))
p_response[0] = cast(addr, POINTER(PamResponse))
for i in range(n_messages):
if messages[i].contents.msg_style == PAM_PROMPT_ECHO_OFF:
pw_copy = strdup(password)
p_response.contents[i].resp = cast(pw_copy, c_char_p)
p_response.contents[i].resp_retcode = 0
return 0
handle = PamHandle()
conv = PamConv(my_conv, 0)
retval = pam_start(service, username, byref(conv), byref(handle))
if retval != 0:
return False
retval = pam_authenticate(handle, 0)
auth_success = (retval == 0)
if auth_success and resetcred:
retval = pam_setcred(handle, PAM_REINITIALIZE_CRED)
pam_end(handle, retval)
return auth_success
|
Returns True if the given username and password authenticate for the
given service. Returns False otherwise.
``username``: the username to authenticate
``password``: the password in plain text
``service``: the PAM service to authenticate against.
Defaults to 'login'
The above parameters can be strings or bytes. If they are strings,
they will be encoded using the encoding given by:
``encoding``: the encoding to use for the above parameters if they
are given as strings. Defaults to 'utf-8'
``resetcred``: Use the pam_setcred() function to
reinitialize the credentials.
Defaults to 'True'.
|
def status_printer():
last_len = [0]
def p(s):
s = next(spinner) + ' ' + s
len_s = len(s)
output = '\r' + s + (' ' * max(last_len[0] - len_s, 0))
sys.stdout.write(output)
sys.stdout.flush()
last_len[0] = len_s
return p
|
Manage the printing and in-place updating of a line of characters
.. note::
If the string is longer than a line, then in-place updating may not
work (it will print a new line at each refresh).
|
def instance(self, counter=None):
if not counter:
history = self.history()
if not history:
return history
else:
return Response._from_json(history['pipelines'][0])
return self._get('/instance/{counter:d}'.format(counter=counter))
|
Returns all the information regarding a specific pipeline run
See the `Go pipeline instance documentation`__ for examples.
.. __: http://api.go.cd/current/#get-pipeline-instance
Args:
counter (int): The pipeline instance to fetch.
If falsey returns the latest pipeline instance from :meth:`history`.
Returns:
Response: :class:`gocd.api.response.Response` object
|
def _match(self, pred):
if not pred:
return True
pred = pred[1:-1]
if pred.startswith('@'):
pred = pred[1:]
if '=' in pred:
attr, value = pred.split('=', 1)
if value[0] in ('"', "'"):
value = value[1:]
if value[-1] in ('"', "'"):
value = value[:-1]
return self.attrs.get(attr) == value
else:
return pred in self.attrs
elif num_re.match(pred):
index = int(pred)
if index < 0:
if self.parent:
return self.index == (len(self.parent._children) + index)
else:
return index == 0
else:
return index == self.index
else:
if '=' in pred:
tag, value = pred.split('=', 1)
if value[0] in ('"', "'"):
value = value[1:]
if value[-1] in ('"', "'"):
value = value[:-1]
for c in self._children:
if c.tagname == tag and c.data == value:
return True
else:
for c in self._children:
if c.tagname == pred:
return True
return False
|
Helper function to determine if this node matches the given predicate.
|
def combine(self, members, output_file, dimension=None, start_index=None, stop_index=None, stride=None):
nco = None
try:
nco = Nco()
except BaseException:
raise ImportError("NCO not found. The NCO python bindings are required to use 'Collection.combine'.")
if len(members) > 0 and hasattr(members[0], 'path'):
members = [ m.path for m in members ]
options = ['-4']
options += ['-L', '3']
options += ['-h']
if dimension is not None:
if start_index is None:
start_index = 0
if stop_index is None:
stop_index = ''
if stride is None:
stride = 1
options += ['-d', '{0},{1},{2},{3}'.format(dimension, start_index, stop_index, stride)]
nco.ncrcat(input=members, output=output_file, options=options)
|
Combine many files into a single file on disk. Defaults to using the 'time' dimension.
|
def get_store(self, name, workspace=None):
stores = self.get_stores(workspaces=workspace, names=name)
return self._return_first_item(stores)
|
Returns a single store object.
Will return None if no store is found.
Will raise an error if more than one store with the same name is found.
|
def JNP(cpu, target):
cpu.PC = Operators.ITEBV(cpu.address_bit_size, False == cpu.PF, target.read(), cpu.PC)
|
Jumps short if not parity.
:param cpu: current CPU.
:param target: destination operand.
|
def when_called_with(self, *some_args, **some_kwargs):
if not self.expected:
raise TypeError('expected exception not set, raises() must be called first')
try:
self.val(*some_args, **some_kwargs)
except BaseException as e:
if issubclass(type(e), self.expected):
return AssertionBuilder(str(e), self.description, self.kind)
else:
self._err('Expected <%s> to raise <%s> when called with (%s), but raised <%s>.' % (
self.val.__name__,
self.expected.__name__,
self._fmt_args_kwargs(*some_args, **some_kwargs),
type(e).__name__))
self._err('Expected <%s> to raise <%s> when called with (%s).' % (
self.val.__name__,
self.expected.__name__,
self._fmt_args_kwargs(*some_args, **some_kwargs)))
|
Asserts the val callable when invoked with the given args and kwargs raises the expected exception.
|
def get_field_settings(self):
field_settings = None
if self.field_settings:
if isinstance(self.field_settings, six.string_types):
profiles = settings.CONFIG.get(self.PROFILE_KEY, {})
field_settings = profiles.get(self.field_settings)
else:
field_settings = self.field_settings
return field_settings
|
Get the field settings, if the configured setting is a string try
to get a 'profile' from the global config.
|
def _rectify_hasher(hasher):
if xxhash is not None:
if hasher in {'xxh32', 'xx32', 'xxhash'}:
return xxhash.xxh32
if hasher in {'xxh64', 'xx64'}:
return xxhash.xxh64
if hasher is NoParam or hasher == 'default':
hasher = DEFAULT_HASHER
elif isinstance(hasher, six.string_types):
if hasher not in hashlib.algorithms_available:
raise KeyError('unknown hasher: {}'.format(hasher))
else:
hasher = getattr(hashlib, hasher)
elif isinstance(hasher, HASH):
return lambda: hasher
return hasher
|
Convert a string-based key into a hasher class
Notes:
In terms of speed on 64bit systems, sha1 is the fastest followed by md5
and sha512. The slowest algorithm is sha256. If xxhash is installed
the fastest algorithm is xxh64.
Example:
>>> assert _rectify_hasher(NoParam) is DEFAULT_HASHER
>>> assert _rectify_hasher('sha1') is hashlib.sha1
>>> assert _rectify_hasher('sha256') is hashlib.sha256
>>> assert _rectify_hasher('sha512') is hashlib.sha512
>>> assert _rectify_hasher('md5') is hashlib.md5
>>> assert _rectify_hasher(hashlib.sha1) is hashlib.sha1
>>> assert _rectify_hasher(hashlib.sha1())().name == 'sha1'
>>> import pytest
>>> assert pytest.raises(KeyError, _rectify_hasher, '42')
>>> #assert pytest.raises(TypeError, _rectify_hasher, object)
>>> if xxhash:
>>> assert _rectify_hasher('xxh64') is xxhash.xxh64
>>> assert _rectify_hasher('xxh32') is xxhash.xxh32
|
def prerender(graph: BELGraph) -> Mapping[str, Mapping[str, Any]]:
import bio2bel_hgnc
from bio2bel_hgnc.models import HumanGene
graph: BELGraph = graph.copy()
enrich_protein_and_rna_origins(graph)
collapse_all_variants(graph)
genes: Set[Gene] = get_nodes_by_function(graph, GENE)
hgnc_symbols = {
gene.name
for gene in genes
if gene.namespace.lower() == 'hgnc'
}
result = {}
hgnc_manager = bio2bel_hgnc.Manager()
human_genes = (
hgnc_manager.session
.query(HumanGene.symbol, HumanGene.location)
.filter(HumanGene.symbol.in_(hgnc_symbols))
.all()
)
for human_gene in human_genes:
result[human_gene.symbol] = {
'name': human_gene.symbol,
'chr': (
human_gene.location.split('q')[0]
if 'q' in human_gene.location else
human_gene.location.split('p')[0]
),
}
df = get_df()
for _, (gene_id, symbol, start, stop) in df[df['Symbol'].isin(hgnc_symbols)].iterrows():
result[symbol]['start'] = start
result[symbol]['stop'] = stop
return result
|
Generate the annotations JSON for Ideogram.
|
def walk_files_relative_path(self, relativePath=""):
def walk_files(directory, relativePath):
directories = dict.__getitem__(directory, 'directories')
files = dict.__getitem__(directory, 'files')
for f in sorted(files):
yield os.path.join(relativePath, f)
for k in sorted(dict.keys(directories)):
path = os.path.join(relativePath, k)
dir = directories.__getitem__(k)
for e in walk_files(dir, path):
yield e
dir, errorMessage = self.get_directory_info(relativePath)
assert dir is not None, errorMessage
return walk_files(dir, relativePath='')
|
Walk the repository and yield all found files relative path joined with file name.
:parameters:
#. relativePath (str): The relative path from which start the walk.
|
def diffusion(diffusion_constant=0.2, exposure_time=0.05, samples=200):
radius = 5
psfsize = np.array([2.0, 1.0, 3.0])
s0 = init.create_single_particle_state(imsize=4*radius,
radius=radius, psfargs={'params': psfsize, 'error': 1e-6})
finalimage = 0*s0.get_model_image()[s0.inner]
position = 0*s0.obj.pos[0]
for i in xrange(samples):
offset = np.sqrt(6*diffusion_constant*exposure_time)*np.random.randn(3)
s0.obj.pos[0] = np.array(s0.image.shape)/2 + offset
s0.reset()
finalimage += s0.get_model_image()[s0.inner]
position += s0.obj.pos[0]
finalimage /= float(samples)
position /= float(samples)
s = init.create_single_particle_state(imsize=4*radius, sigma=0.05,
radius=radius, psfargs={'params': psfsize, 'error': 1e-6})
s.reset()
return s, finalimage, position
|
See `diffusion_correlated` for information related to units, etc
|
def print_coords(rows, prefix=''):
lat = [row['lat'] for row in rows]
lon = [row['lon'] for row in rows]
print('COORDS'+'-' * 5)
print("%slat, %slon = %r, %r" % (prefix, prefix, lat, lon))
print('-'*5)
|
Print coordinates within a sequence.
This is only used for debugging. Printed in a form that can be
pasted into Python for visualization.
|
def before_constant(self, constant, key):
newlines_split = split_on_newlines(constant)
for c in newlines_split:
if is_newline(c):
self.current.advance_line()
if self.current.line > self.target.line:
return self.STOP
else:
advance_by = len(c)
if self.is_on_targetted_node(advance_by):
self.found_path = deepcopy(self.current_path)
return self.STOP
self.current.advance_columns(advance_by)
|
Determine if we're on the targetted node.
If the targetted column is reached, `stop` and `path_found` are
set. If the targetted line is passed, only `stop` is set. This
prevents unnecessary tree travelling when the targetted column
is out of bounds.
|
def serpentine_y(x, y, matrix):
if x % 2:
return x, matrix.rows - 1 - y
return x, y
|
Every other column is indexed in reverse.
|
def iso_reference_str2int(n):
n = n.upper()
numbers = []
for c in n:
iso_reference_valid_char(c)
if c in ISO_REFERENCE_VALID_NUMERIC:
numbers.append(c)
else:
numbers.append(str(iso_reference_char2int(c)))
return int(''.join(numbers))
|
Creates the huge number from ISO alphanumeric ISO reference
|
def parse(string):
bib = []
if not isinstance(string, six.text_type):
string = string.decode('utf-8')
for key, value in special_chars:
string = string.replace(key, value)
string = re.sub(r'\\[cuHvs]{?([a-zA-Z])}?', r'\1', string)
entries = re.findall(
r'(?u)@(\w+)[ \t]?{[ \t]*([^,\s]*)[ \t]*,?\s*((?:[^=,\s]+\s*\=\s*(?:"[^"]*"|{(?:[^{}]*|{[^{}]*})*}|[^,}]*),?\s*?)+)\s*}',
string)
for entry in entries:
pairs = re.findall(r'(?u)([^=,\s]+)\s*\=\s*("[^"]*"|{(?:[^{}]*|{[^{}]*})*}|[^,]*)', entry[2])
bib.append({'type': entry[0].lower(), 'key': entry[1]})
for key, value in pairs:
key = key.lower()
if value and value[0] == '"' and value[-1] == '"':
value = value[1:-1]
if value and value[0] == '{' and value[-1] == '}':
value = value[1:-1]
if key not in ['booktitle', 'title']:
value = value.replace('}', '').replace('{', '')
else:
if value.startswith('{') and value.endswith('}'):
value = value[1:]
value = value[:-1]
value = value.strip()
value = re.sub(r'\s+', ' ', value)
bib[-1][key] = value
return bib
|
Takes a string in BibTex format and returns a list of BibTex entries, where
each entry is a dictionary containing the entries' key-value pairs.
@type string: string
@param string: bibliography in BibTex format
@rtype: list
@return: a list of dictionaries representing a bibliography
|
def _to_json(self, resp):
try:
json = resp.json()
except ValueError as e:
reason = "TMC Server did not send valid JSON: {0}"
raise APIError(reason.format(repr(e)))
return json
|
Extract json from a response.
Assumes response is valid otherwise.
Internal use only.
|
def compute(self, activeColumns, learn=True):
bottomUpInput = numpy.zeros(self.numberOfCols, dtype=dtype)
bottomUpInput[list(activeColumns)] = 1
super(TemporalMemoryShim, self).compute(bottomUpInput,
enableLearn=learn,
enableInference=True)
predictedState = self.getPredictedState()
self.predictiveCells = set(numpy.flatnonzero(predictedState))
|
Feeds input record through TM, performing inference and learning.
Updates member variables with new state.
@param activeColumns (set) Indices of active columns in `t`
|
def gtk_mouse_button_down(self, widget, event):
if self.menu_enabled and event.button == 3:
menu = self.uimanager.get_widget('/Save as')
menu.popup(None, None, None, None, event.button, event.time)
else:
super(ShoebotWindow, self).gtk_mouse_button_down(widget, event)
|
Handle right mouse button clicks
|
def sync_unicorn_to_manticore(self):
self.write_backs_disabled = True
for reg in self.registers:
val = self._emu.reg_read(self._to_unicorn_id(reg))
self._cpu.write_register(reg, val)
if len(self._mem_delta) > 0:
logger.debug(f"Syncing {len(self._mem_delta)} writes back into Manticore")
for location in self._mem_delta:
value, size = self._mem_delta[location]
self._cpu.write_int(location, value, size * 8)
self.write_backs_disabled = False
self._mem_delta = {}
|
Copy registers and written memory back into Manticore
|
def dump_config_file(filename, args, parser=None):
config = ConfigParser()
config.add_section(SECTION)
if parser is None:
for attr in args:
config.set(SECTION, attr, args.attr)
else:
keys_empty_values_not_pass = (
'--extra-settings', '--languages', '--requirements', '--template', '--timezone')
for action in parser._actions:
if action.dest in ('help', 'config_file', 'config_dump', 'project_name'):
continue
keyp = action.option_strings[0]
option_name = keyp.lstrip('-')
option_value = getattr(args, action.dest)
if any([i for i in keys_empty_values_not_pass if i in action.option_strings]):
if action.dest == 'languages':
if len(option_value) == 1 and option_value[0] == 'en':
config.set(SECTION, option_name, '')
else:
config.set(SECTION, option_name, ','.join(option_value))
else:
config.set(SECTION, option_name, option_value if option_value else '')
elif action.choices == ('yes', 'no'):
config.set(SECTION, option_name, 'yes' if option_value else 'no')
elif action.dest == 'templates':
config.set(SECTION, option_name, option_value if option_value else 'no')
elif action.dest == 'cms_version':
version = ('stable' if option_value == CMS_VERSION_MATRIX['stable']
else option_value)
config.set(SECTION, option_name, version)
elif action.dest == 'django_version':
version = ('stable' if option_value == DJANGO_VERSION_MATRIX['stable']
else option_value)
config.set(SECTION, option_name, version)
elif action.const:
config.set(SECTION, option_name, 'true' if option_value else 'false')
else:
config.set(SECTION, option_name, str(option_value))
with open(filename, 'w') as fp:
config.write(fp)
|
Dump args to config file.
|
async def play(self, track_index: int = 0, ignore_shuffle: bool = False):
if self.repeat and self.current:
self.queue.append(self.current)
self.previous = self.current
self.current = None
self.position = 0
self.paused = False
if not self.queue:
await self.stop()
await self._lavalink.dispatch_event(QueueEndEvent(self))
else:
if self.shuffle and not ignore_shuffle:
track = self.queue.pop(randrange(len(self.queue)))
else:
track = self.queue.pop(min(track_index, len(self.queue) - 1))
self.current = track
await self._lavalink.ws.send(op='play', guildId=self.guild_id, track=track.track)
await self._lavalink.dispatch_event(TrackStartEvent(self, track))
|
Plays the first track in the queue, if any or plays a track from the specified index in the queue.
|
def flake8_color(score):
score_cutoffs = (0, 20, 50, 100, 200)
for i in range(len(score_cutoffs)):
if score <= score_cutoffs[i]:
return BADGE_COLORS[i]
return BADGE_COLORS[-1]
|
Return flake8 badge color.
Parameters
----------
score : float
A flake8 score
Returns
-------
str
Badge color
|
def App(apptype, data_flow_kernel=None, walltime=60, cache=False, executors='all'):
from parsl.app.python import PythonApp
from parsl.app.bash import BashApp
logger.warning("The 'App' decorator will be deprecated in Parsl 0.8. Please use 'python_app' or 'bash_app' instead.")
if apptype == 'python':
app_class = PythonApp
elif apptype == 'bash':
app_class = BashApp
else:
raise InvalidAppTypeError("Invalid apptype requested {}; must be 'python' or 'bash'".format(apptype))
def wrapper(f):
return app_class(f,
data_flow_kernel=data_flow_kernel,
walltime=walltime,
cache=cache,
executors=executors)
return wrapper
|
The App decorator function.
Args:
- apptype (string) : Apptype can be bash|python
Kwargs:
- data_flow_kernel (DataFlowKernel): The :class:`~parsl.dataflow.dflow.DataFlowKernel` responsible for
managing this app. This can be omitted only
after calling :meth:`parsl.dataflow.dflow.DataFlowKernelLoader.load`.
- walltime (int) : Walltime for app in seconds,
default=60
- executors (str|list) : Labels of the executors that this app can execute over. Default is 'all'.
- cache (Bool) : Enable caching of the app call
default=False
Returns:
A PythonApp or BashApp object, which when called runs the apps through the executor.
|
def deploy(target):
if not os.getenv(CIRCLECI_ENV_VAR):
raise EnvironmentError('Must be on CircleCI to run this script')
current_branch = os.getenv('CIRCLE_BRANCH')
if (target == 'PROD') and (current_branch != 'master'):
raise EnvironmentError((
'Refusing to deploy to production from branch {current_branch!r}. '
'Production deploys can only be made from master.'
).format(current_branch=current_branch))
if target in ('PROD', 'TEST'):
pypi_username = os.getenv('{target}_PYPI_USERNAME'.format(target=target))
pypi_password = os.getenv('{target}_PYPI_PASSWORD'.format(target=target))
else:
raise ValueError(
"Deploy target must be 'PROD' or 'TEST', got {target!r}.".format(target=target))
if not (pypi_username and pypi_password):
raise EnvironmentError((
"Missing '{target}_PYPI_USERNAME' and/or '{target}_PYPI_PASSWORD' "
"environment variables. These are required to push to PyPI."
).format(target=target))
os.environ['TWINE_USERNAME'] = pypi_username
os.environ['TWINE_PASSWORD'] = pypi_password
_shell('git config --global user.email "oss@cloverhealth.com"')
_shell('git config --global user.name "Circle CI"')
_shell('git config push.default current')
ret = _shell('make version', stdout=subprocess.PIPE)
version = ret.stdout.decode('utf-8').strip()
print('Deploying version {version!r}...'.format(version=version))
_shell('git tag -f -a {version} -m "Version {version}"'.format(version=version))
_shell(
'sed -i.bak "s/^__version__ = .*/__version__ = {version!r}/" */version.py'.format(
version=version))
_shell('python setup.py sdist bdist_wheel')
_shell('git add ChangeLog AUTHORS */version.py')
_shell('git commit --no-verify -m "Merge autogenerated files [skip ci]"')
_pypi_push('dist')
_shell('git push --follow-tags')
print('Deployment complete. Latest version is {version}.'.format(version=version))
|
Deploys the package and documentation.
Proceeds in the following steps:
1. Ensures proper environment variables are set and checks that we are on Circle CI
2. Tags the repository with the new version
3. Creates a standard distribution and a wheel
4. Updates version.py to have the proper version
5. Commits the ChangeLog, AUTHORS, and version.py file
6. Pushes to PyPI
7. Pushes the tags and newly committed files
Raises:
`EnvironmentError`:
- Not running on CircleCI
- `*_PYPI_USERNAME` and/or `*_PYPI_PASSWORD` environment variables
are missing
- Attempting to deploy to production from a branch that isn't master
|
def map(cls, x, palette, limits, na_value=None):
n = len(limits)
pal = palette(n)[match(x, limits)]
try:
pal[pd.isnull(x)] = na_value
except TypeError:
pal = [v if not pd.isnull(v) else na_value for v in pal]
return pal
|
Map values to a discrete palette
Parameters
----------
palette : callable ``f(x)``
palette to use
x : array_like
Continuous values to scale
na_value : object
Value to use for missing values.
Returns
-------
out : array_like
Values mapped onto a palette
|
async def read(response, loads=loads, encoding=None):
ctype = response.headers.get('Content-Type', "").lower()
try:
if "application/json" in ctype:
logger.info("decoding data as json")
return await response.json(encoding=encoding, loads=loads)
if "text" in ctype:
logger.info("decoding data as text")
return await response.text(encoding=encoding)
except (UnicodeDecodeError, json.JSONDecodeError) as exc:
data = await response.read()
raise exceptions.PeonyDecodeError(response=response,
data=data,
exception=exc)
return await response.read()
|
read the data of the response
Parameters
----------
response : aiohttp.ClientResponse
response
loads : callable
json loads function
encoding : :obj:`str`, optional
character encoding of the response, if set to None
aiohttp should guess the right encoding
Returns
-------
:obj:`bytes`, :obj:`str`, :obj:`dict` or :obj:`list`
the data returned depends on the response
|
def post_cleanup(self):
parse_tags = ['p']
if self.config.parse_lists:
parse_tags.extend(['ul', 'ol'])
if self.config.parse_headers:
parse_tags.extend(['h1', 'h2', 'h3', 'h4', 'h5', 'h6'])
target_node = self.article.top_node
node = self.add_siblings(target_node)
for elm in self.parser.getChildren(node):
e_tag = self.parser.getTag(elm)
if e_tag not in parse_tags:
if (self.is_highlink_density(elm) or self.is_table_and_no_para_exist(elm) or
not self.is_nodescore_threshold_met(node, elm)):
self.parser.remove(elm)
return node
|
\
remove any divs that looks like non-content,
clusters of links, or paras with no gusto
|
def set_user_methods_P(self, user_methods_P, forced_P=False):
r
if isinstance(user_methods_P, str):
user_methods_P = [user_methods_P]
self.user_methods_P = user_methods_P
self.forced_P = forced_P
if set(self.user_methods_P).difference(self.all_methods_P):
raise Exception("One of the given methods is not available for this chemical")
if not self.user_methods_P and self.forced:
raise Exception('Only user specified methods are considered when forced is True, but no methods were provided')
self.method_P = None
self.sorted_valid_methods_P = []
self.TP_cached = None
|
r'''Method to set the pressure-dependent property methods desired for
consideration by the user. Can be used to exclude certain methods which
might have unacceptable accuracy.
As a side effect, the previously selected method is removed when
this method is called to ensure user methods are tried in the desired
order.
Parameters
----------
user_methods_P : str or list
Methods by name to be considered or preferred for pressure effect.
forced : bool, optional
If True, only the user specified methods will ever be considered;
if False other methods will be considered if no user methods
suceed.
|
def deploy(
src, requirements=None, local_package=None,
config_file='config.yaml', profile_name=None,
preserve_vpc=False
):
path_to_config_file = os.path.join(src, config_file)
cfg = read_cfg(path_to_config_file, profile_name)
path_to_zip_file = build(
src, config_file=config_file,
requirements=requirements,
local_package=local_package,
)
existing_config = get_function_config(cfg)
if existing_config:
update_function(cfg, path_to_zip_file, existing_config, preserve_vpc=preserve_vpc)
else:
create_function(cfg, path_to_zip_file)
|
Deploys a new function to AWS Lambda.
:param str src:
The path to your Lambda ready project (folder must contain a valid
config.yaml and handler module (e.g.: service.py).
:param str local_package:
The path to a local package with should be included in the deploy as
well (and/or is not available on PyPi)
|
def get_node(self, string_key):
pos = self.get_node_pos(string_key)
if pos is None:
return None
return self.ring[self._sorted_keys[pos]]
|
Given a string key a corresponding node in the hash ring is returned.
If the hash ring is empty, `None` is returned.
|
def check_docstring(cls):
docstring = inspect.getdoc(cls)
if not docstring:
breadcrumbs = " -> ".join(t.__name__ for t in inspect.getmro(cls)[:-1][::-1])
msg = "docstring required for plugin '%s' (%s, defined in %s)"
args = (cls.__name__, breadcrumbs, cls.__module__)
raise InternalCashewException(msg % args)
max_line_length = cls._class_settings.get('max-docstring-length')
if max_line_length:
for i, line in enumerate(docstring.splitlines()):
if len(line) > max_line_length:
msg = "docstring line %s of %s is %s chars too long"
args = (i, cls.__name__, len(line) - max_line_length)
raise Exception(msg % args)
return docstring
|
Asserts that the class has a docstring, returning it if successful.
|
def sf01(arr):
s = arr.shape
return arr.swapaxes(0, 1).reshape(s[0] * s[1], *s[2:])
|
swap and then flatten axes 0 and 1
|
def get(self, key_name, decrypt=True):
self._assert_valid_stash()
key = self._storage.get(key_name).copy()
if not key.get('value'):
return None
if decrypt:
key['value'] = self._decrypt(key['value'])
audit(
storage=self._storage.db_path,
action='GET',
message=json.dumps(dict(key_name=key_name)))
return key
|
Return a key with its parameters if it was found.
|
def write_bel_namespace(self, file: TextIO, use_names: bool = False) -> None:
if not self.is_populated():
self.populate()
if use_names and not self.has_names:
raise ValueError
values = (
self._get_namespace_name_to_encoding(desc='writing names')
if use_names else
self._get_namespace_identifier_to_encoding(desc='writing identifiers')
)
write_namespace(
namespace_name=self._get_namespace_name(),
namespace_keyword=self._get_namespace_keyword(),
namespace_query_url=self.identifiers_url,
values=values,
file=file,
)
|
Write as a BEL namespace file.
|
def draw_mask(self, image_shape, size_lines=1, size_points=0,
raise_if_out_of_image=False):
heatmap = self.draw_heatmap_array(
image_shape,
alpha_lines=1.0, alpha_points=1.0,
size_lines=size_lines, size_points=size_points,
antialiased=False,
raise_if_out_of_image=raise_if_out_of_image)
return heatmap > 0.5
|
Draw this line segment as a binary image mask.
Parameters
----------
image_shape : tuple of int
The shape of the image onto which to draw the line mask.
size_lines : int, optional
Thickness of the line segments.
size_points : int, optional
Size of the points in pixels.
raise_if_out_of_image : bool, optional
Whether to raise an error if the line string is fully
outside of the image. If set to False, no error will be raised and
only the parts inside the image will be drawn.
Returns
-------
ndarray
Boolean line mask of shape `image_shape` (no channel axis).
|
def fill_rect(setter, x, y, w, h, color=None, aa=False):
for i in range(x, x + w):
_draw_fast_vline(setter, i, y, h, color, aa)
|
Draw solid rectangle with top-left corner at x,y, width w and height h
|
def create_file(fname=None, fname_tmp=None, tmpdir=None,
save_tmpfile=False, keepext=False):
if fname == ':memory:':
yield fname
return
if fname_tmp is None:
basename = os.path.basename(fname)
root, ext = os.path.splitext(basename)
dir_ = this_dir = os.path.dirname(fname)
if not keepext:
root = root + ext
ext = ''
if tmpdir:
if tmpdir is True:
for dir__ in possible_tmpdirs:
if os.access(dir__, os.F_OK):
dir_ = dir__
break
tmpfile = tempfile.NamedTemporaryFile(
prefix='tmp-' + root + '-', suffix=ext, dir=dir_, delete=False)
fname_tmp = tmpfile.name
try:
yield fname_tmp
except Exception as e:
if save_tmpfile:
print("Temporary file is '%s'" % fname_tmp)
else:
os.unlink(fname_tmp)
raise
try:
os.rename(fname_tmp, fname)
os.chmod(fname, 0o777 & ~current_umask)
except OSError as e:
tmpfile2 = tempfile.NamedTemporaryFile(
prefix='tmp-' + root + '-', suffix=ext, dir=this_dir, delete=False)
shutil.copy(fname_tmp, tmpfile2.name)
os.rename(tmpfile2.name, fname)
os.chmod(fname, 0o666 & ~current_umask)
os.unlink(fname_tmp)
|
Context manager for making files with possibility of failure.
If you are creating a file, it is possible that the code will fail
and leave a corrupt intermediate file. This is especially damaging
if this is used as automatic input to another process. This context
manager helps by creating a temporary filename, your code runs and
creates that temporary file, and then if no exceptions are raised,
the context manager will move the temporary file to the original
filename you intended to open.
Parameters
----------
fname : str
Target filename, this file will be created if all goes well
fname_tmp : str
If given, this is used as the temporary filename.
tmpdir : str or bool
If given, put temporary files in this directory. If `True`,
then find a good tmpdir that is not on local filesystem.
save_tmpfile : bool
If true, the temporary file is not deleteted if an exception
is raised.
keepext : bool, default False
If true, have tmpfile have same extension as final file.
Returns (as context manager value)
----------------------------------
fname_tmp: str
Temporary filename to be used. Same as `fname_tmp`
if given as an argument.
Raises
------
Re-raises any except occuring during the context block.
|
def write_to_fullarr(data, sample, sidx):
LOGGER.info("writing fullarr %s %s", sample.name, sidx)
with h5py.File(data.clust_database, 'r+') as io5:
chunk = io5["catgs"].attrs["chunksize"][0]
catg = io5["catgs"]
nall = io5["nalleles"]
smpio = os.path.join(data.dirs.across, sample.name+'.tmp.h5')
with h5py.File(smpio) as indat:
newcatg = indat["icatg"]
onall = indat["inall"]
for cidx in xrange(0, catg.shape[0], chunk):
end = cidx + chunk
catg[cidx:end, sidx:sidx+1, :] = np.expand_dims(newcatg[cidx:end, :], axis=1)
nall[:, sidx:sidx+1] = np.expand_dims(onall, axis=1)
|
writes arrays to h5 disk
|
def _inactivate_organization(organization):
[_inactivate_organization_course_relationship(record) for record
in internal.OrganizationCourse.objects.filter(organization_id=organization.id, active=True)]
[_inactivate_record(record) for record
in internal.Organization.objects.filter(id=organization.id, active=True)]
|
Inactivates an activated organization as well as any active relationships
|
def __base_state(self, containers):
return dict(blockade_id=self._blockade_id,
containers=containers,
version=self._state_version)
|
Convert blockade ID and container information into
a state dictionary object.
|
def _new_dynspace(
self,
name=None,
bases=None,
formula=None,
refs=None,
arguments=None,
source=None,
):
if name is None:
name = self.spacenamer.get_next(self.namespace)
if name in self.namespace:
raise ValueError("Name '%s' already exists." % name)
if not is_valid_name(name):
raise ValueError("Invalid name '%s'." % name)
space = RootDynamicSpaceImpl(
parent=self,
name=name,
formula=formula,
refs=refs,
source=source,
arguments=arguments,
)
space.is_derived = False
self._set_space(space)
if bases:
dynbase = self._get_dynamic_base(bases)
space._dynbase = dynbase
dynbase._dynamic_subs.append(space)
return space
|
Create a new dynamic root space.
|
def load_command_table(self, args):
with CommandSuperGroup(__name__, self,
'rcctl.custom_cluster
with super_group.group('cluster') as group:
group.command('select', 'select')
with CommandSuperGroup(__name__, self, 'rcctl.custom_reliablecollections
client_factory=client_create) as super_group:
with super_group.group('dictionary') as group:
group.command('query', 'query_reliabledictionary')
group.command('execute', 'execute_reliabledictionary')
group.command('schema', 'get_reliabledictionary_schema')
group.command('list', 'get_reliabledictionary_list')
group.command('type-schema', 'get_reliabledictionary_type_schema')
with ArgumentsContext(self, 'dictionary') as ac:
ac.argument('application_name', options_list=['--application-name', '-a'])
ac.argument('service_name', options_list=['--service-name', '-s'])
ac.argument('dictionary_name', options_list=['--dictionary-name', '-d'])
ac.argument('output_file', options_list=['--output-file', '-out'])
ac.argument('input_file', options_list=['--input-file', '-in'])
ac.argument('query_string', options_list=['--query-string', '-q'])
ac.argument('type_name', options_list=['--type-name', '-t'])
return OrderedDict(self.command_table)
|
Load all Service Fabric commands
|
def check_pid(pid, debug):
try:
os.kill(pid, 0)
if debug > 1:
print("Script has a PIDFILE where the process is still running")
return True
except OSError:
if debug > 1:
print("Script does not appear to be running")
return False
|
This function will check whether a PID is currently running
|
def plot_nodes(self, nodelist, theta, group):
for i, node in enumerate(nodelist):
r = self.internal_radius + i * self.scale
x, y = get_cartesian(r, theta)
circle = plt.Circle(xy=(x, y), radius=self.dot_radius,
color=self.node_colormap[group], linewidth=0)
self.ax.add_patch(circle)
|
Plots nodes to screen.
|
def forward(self, x):
features = self.conv(x).mean(dim=2)
return self.dense(features)
|
Passing data through the network.
:param x: 2d tensor containing both (x,y) Variables
:return: output of the net
|
def create(self, path, lock):
self._lock.acquire_write()
try:
assert lock.get("token") is None
assert lock.get("expire") is None, "Use timeout instead of expire"
assert path and "/" in path
org_path = path
path = normalize_lock_root(path)
lock["root"] = path
timeout = float(lock.get("timeout"))
if timeout is None:
timeout = LockStorageDict.LOCK_TIME_OUT_DEFAULT
elif timeout < 0 or timeout > LockStorageDict.LOCK_TIME_OUT_MAX:
timeout = LockStorageDict.LOCK_TIME_OUT_MAX
lock["timeout"] = timeout
lock["expire"] = time.time() + timeout
validate_lock(lock)
token = generate_lock_token()
lock["token"] = token
self._dict[token] = lock
key = "URL2TOKEN:{}".format(path)
if key not in self._dict:
self._dict[key] = [token]
else:
tokList = self._dict[key]
tokList.append(token)
self._dict[key] = tokList
self._flush()
_logger.debug(
"LockStorageDict.set({!r}): {}".format(org_path, lock_string(lock))
)
return lock
finally:
self._lock.release()
|
Create a direct lock for a resource path.
path:
Normalized path (utf8 encoded string, no trailing '/')
lock:
lock dictionary, without a token entry
Returns:
New unique lock token.: <lock
**Note:** the lock dictionary may be modified on return:
- lock['root'] is ignored and set to the normalized <path>
- lock['timeout'] may be normalized and shorter than requested
- lock['token'] is added
|
def alpha(reliability_data=None, value_counts=None, value_domain=None, level_of_measurement='interval',
dtype=np.float64):
if (reliability_data is None) == (value_counts is None):
raise ValueError("Either reliability_data or value_counts must be provided, but not both.")
if value_counts is None:
if type(reliability_data) is not np.ndarray:
reliability_data = np.array(reliability_data)
value_domain = value_domain or np.unique(reliability_data[~np.isnan(reliability_data)])
value_counts = _reliability_data_to_value_counts(reliability_data, value_domain)
else:
if value_domain:
assert value_counts.shape[1] == len(value_domain), \
"The value domain should be equal to the number of columns of value_counts."
else:
value_domain = tuple(range(value_counts.shape[1]))
distance_metric = _distance_metric(level_of_measurement)
o = _coincidences(value_counts, value_domain, dtype=dtype)
n_v = np.sum(o, axis=0)
n = np.sum(n_v)
e = _random_coincidences(value_domain, n, n_v)
d = _distances(value_domain, distance_metric, n_v)
return 1 - np.sum(o * d) / np.sum(e * d)
|
Compute Krippendorff's alpha.
See https://en.wikipedia.org/wiki/Krippendorff%27s_alpha for more information.
Parameters
----------
reliability_data : array_like, with shape (M, N)
Reliability data matrix which has the rate the i coder gave to the j unit, where M is the number of raters
and N is the unit count.
Missing rates are represented with `np.nan`.
If it's provided then `value_counts` must not be provided.
value_counts : ndarray, with shape (N, V)
Number of coders that assigned a certain value to a determined unit, where N is the number of units
and V is the value count.
If it's provided then `reliability_data` must not be provided.
value_domain : array_like, with shape (V,)
Possible values the units can take.
If the level of measurement is not nominal, it must be ordered.
If `reliability_data` is provided, then the default value is the ordered list of unique rates that appear.
Else, the default value is `list(range(V))`.
level_of_measurement : string or callable
Steven's level of measurement of the variable.
It must be one of 'nominal', 'ordinal', 'interval', 'ratio' or a callable.
dtype : data-type
Result and computation data-type.
Returns
-------
alpha : `dtype`
Scalar value of Krippendorff's alpha of type `dtype`.
Examples
--------
>>> reliability_data = [[np.nan, np.nan, np.nan, np.nan, np.nan, 3, 4, 1, 2, 1, 1, 3, 3, np.nan, 3],
... [1, np.nan, 2, 1, 3, 3, 4, 3, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan],
... [np.nan, np.nan, 2, 1, 3, 4, 4, np.nan, 2, 1, 1, 3, 3, np.nan, 4]]
>>> print(round(alpha(reliability_data=reliability_data, level_of_measurement='nominal'), 6))
0.691358
>>> print(round(alpha(reliability_data=reliability_data, level_of_measurement='interval'), 6))
0.810845
>>> value_counts = np.array([[1, 0, 0, 0],
... [0, 0, 0, 0],
... [0, 2, 0, 0],
... [2, 0, 0, 0],
... [0, 0, 2, 0],
... [0, 0, 2, 1],
... [0, 0, 0, 3],
... [1, 0, 1, 0],
... [0, 2, 0, 0],
... [2, 0, 0, 0],
... [2, 0, 0, 0],
... [0, 0, 2, 0],
... [0, 0, 2, 0],
... [0, 0, 0, 0],
... [0, 0, 1, 1]])
>>> print(round(alpha(value_counts=value_counts, level_of_measurement='nominal'), 6))
0.691358
>>> # The following examples were extracted from
>>> # https://www.statisticshowto.datasciencecentral.com/wp-content/uploads/2016/07/fulltext.pdf, page 8.
>>> reliability_data = [[1, 2, 3, 3, 2, 1, 4, 1, 2, np.nan, np.nan, np.nan],
... [1, 2, 3, 3, 2, 2, 4, 1, 2, 5, np.nan, 3.],
... [np.nan, 3, 3, 3, 2, 3, 4, 2, 2, 5, 1, np.nan],
... [1, 2, 3, 3, 2, 4, 4, 1, 2, 5, 1, np.nan]]
>>> print(round(alpha(reliability_data, level_of_measurement='ordinal'), 3))
0.815
>>> print(round(alpha(reliability_data, level_of_measurement='ratio'), 3))
0.797
|
def has_protein_modification_increases_activity(graph: BELGraph,
source: BaseEntity,
target: BaseEntity,
key: str,
) -> bool:
edge_data = graph[source][target][key]
return has_protein_modification(graph, source) and part_has_modifier(edge_data, OBJECT, ACTIVITY)
|
Check if pmod of source causes activity of target.
|
def activatePredictedColumn(self, column, columnActiveSegments,
columnMatchingSegments, prevActiveCells,
prevWinnerCells, learn):
return self._activatePredictedColumn(
self.connections, self._random,
columnActiveSegments, prevActiveCells, prevWinnerCells,
self.numActivePotentialSynapsesForSegment,
self.maxNewSynapseCount, self.initialPermanence,
self.permanenceIncrement, self.permanenceDecrement,
self.maxSynapsesPerSegment, learn)
|
Determines which cells in a predicted column should be added to winner cells
list, and learns on the segments that correctly predicted this column.
:param column: (int) Index of bursting column.
:param columnActiveSegments: (iter) Active segments in this column.
:param columnMatchingSegments: (iter) Matching segments in this column.
:param prevActiveCells: (list) Active cells in ``t-1``.
:param prevWinnerCells: (list) Winner cells in ``t-1``.
:param learn: (bool) If true, grow and reinforce synapses.
:returns: (list) A list of predicted cells that will be added to
active cells and winner cells.
|
async def _now(self, ctx):
player = self.bot.lavalink.players.get(ctx.guild.id)
song = 'Nothing'
if player.current:
position = lavalink.Utils.format_time(player.position)
if player.current.stream:
duration = '🔴 LIVE'
else:
duration = lavalink.Utils.format_time(player.current.duration)
song = f'**[{player.current.title}]({player.current.uri})**\n({position}/{duration})'
embed = discord.Embed(color=discord.Color.blurple(), title='Now Playing', description=song)
await ctx.send(embed=embed)
|
Shows some stats about the currently playing song.
|
def em_schedule(**kwargs):
mdrunner = kwargs.pop('mdrunner', None)
integrators = kwargs.pop('integrators', ['l-bfgs', 'steep'])
kwargs.pop('integrator', None)
nsteps = kwargs.pop('nsteps', [100, 1000])
outputs = ['em{0:03d}_{1!s}.pdb'.format(i, integrator) for i,integrator in enumerate(integrators)]
outputs[-1] = kwargs.pop('output', 'em.pdb')
files = {'struct': kwargs.pop('struct', None)}
for i, integrator in enumerate(integrators):
struct = files['struct']
logger.info("[em %d] energy minimize with %s for maximum %d steps", i, integrator, nsteps[i])
kwargs.update({'struct':struct, 'output':outputs[i],
'integrator':integrator, 'nsteps': nsteps[i]})
if not integrator == 'l-bfgs':
kwargs['mdrunner'] = mdrunner
else:
kwargs['mdrunner'] = None
logger.warning("[em %d] Not using mdrunner for L-BFGS because it cannot "
"do parallel runs.", i)
files = energy_minimize(**kwargs)
return files
|
Run multiple energy minimizations one after each other.
:Keywords:
*integrators*
list of integrators (from 'l-bfgs', 'cg', 'steep')
[['bfgs', 'steep']]
*nsteps*
list of maximum number of steps; one for each integrator in
in the *integrators* list [[100,1000]]
*kwargs*
mostly passed to :func:`gromacs.setup.energy_minimize`
:Returns: dictionary with paths to final structure ('struct') and
other files
:Example:
Conduct three minimizations:
1. low memory Broyden-Goldfarb-Fletcher-Shannon (BFGS) for 30 steps
2. steepest descent for 200 steps
3. finish with BFGS for another 30 steps
We also do a multi-processor minimization when possible (i.e. for steep
(and conjugate gradient) by using a :class:`gromacs.run.MDrunner` class
for a :program:`mdrun` executable compiled for OpenMP in 64 bit (see
:mod:`gromacs.run` for details)::
import gromacs.run
gromacs.setup.em_schedule(struct='solvate/ionized.gro',
mdrunner=gromacs.run.MDrunnerOpenMP64,
integrators=['l-bfgs', 'steep', 'l-bfgs'],
nsteps=[50,200, 50])
.. Note:: You might have to prepare the mdp file carefully because at the
moment one can only modify the *nsteps* parameter on a
per-minimizer basis.
|
def _step6func(self,
samples,
noreverse,
force,
randomseed,
ipyclient,
**kwargs):
samples = _get_samples(self, samples)
csamples = self._samples_precheck(samples, 6, force)
if self._headers:
print("\n Step 6: Clustering at {} similarity across {} samples".\
format(self.paramsdict["clust_threshold"], len(csamples)))
if not csamples:
raise IPyradError(FIRST_RUN_5)
elif not force:
if all([i.stats.state >= 6 for i in csamples]):
print(DATABASE_EXISTS.format(len(samples)))
return
assemble.cluster_across.run(
self,
csamples,
noreverse,
force,
randomseed,
ipyclient,
**kwargs)
|
Hidden function to start Step 6.
|
def verify_jid_against_srv_name(self, jid, srv_type):
srv_prefix = u"_" + srv_type + u"."
srv_prefix_l = len(srv_prefix)
for srv in self.alt_names.get("SRVName", []):
logger.debug("checking {0!r} against {1!r}".format(jid,
srv))
if not srv.startswith(srv_prefix):
logger.debug("{0!r} does not start with {1!r}"
.format(srv, srv_prefix))
continue
try:
srv_jid = JID(srv[srv_prefix_l:])
except ValueError:
continue
if srv_jid == jid:
logger.debug("Match!")
return True
return False
|
Check if the cerificate is valid for given domain-only JID
and a service type.
:Parameters:
- `jid`: JID requested (domain part only)
- `srv_type`: service type, e.g. 'xmpp-client'
:Types:
- `jid`: `JID`
- `srv_type`: `unicode`
:Returntype: `bool`
|
def generate(self, title=None, version=None, base_path=None,
info=None, swagger=None, **kwargs):
title = title or self.api_title
version = version or self.api_version
info = info or self.swagger.get('info', {})
swagger = swagger or self.swagger
base_path = base_path or self.base_path
swagger = swagger.copy()
info.update(title=title, version=version)
swagger.update(swagger='2.0', info=info, basePath=base_path)
paths, tags = self._build_paths()
if tags:
swagger.setdefault('tags', [])
tag_names = {t['name'] for t in swagger['tags']}
for tag in tags:
if tag['name'] not in tag_names:
swagger['tags'].append(tag)
if paths:
swagger.setdefault('paths', {})
merge_dicts(swagger['paths'], paths)
definitions = self.definitions.definition_registry
if definitions:
swagger.setdefault('definitions', {})
merge_dicts(swagger['definitions'], definitions)
parameters = self.parameters.parameter_registry
if parameters:
swagger.setdefault('parameters', {})
merge_dicts(swagger['parameters'], parameters)
responses = self.responses.response_registry
if responses:
swagger.setdefault('responses', {})
merge_dicts(swagger['responses'], responses)
return swagger
|
Generate a Swagger 2.0 documentation. Keyword arguments may be used
to provide additional information to build methods as such ignores.
:param title:
The name presented on the swagger document.
:param version:
The version of the API presented on the swagger document.
:param base_path:
The path that all requests to the API must refer to.
:param info:
Swagger info field.
:param swagger:
Extra fields that should be provided on the swagger documentation.
:rtype: dict
:returns: Full OpenAPI/Swagger compliant specification for the application.
|
def economic_status(self):
r
if self.__economic_status:
return self.__economic_status
else:
self.__economic_status = economic_status(self.CAS, Method='Combined')
return self.__economic_status
|
r'''Dictionary of economic status indicators for the chemical.
Examples
--------
>>> pprint(Chemical('benzene').economic_status)
["US public: {'Manufactured': 6165232.1, 'Imported': 463146.474, 'Exported': 271908.252}",
u'1,000,000 - 10,000,000 tonnes per annum',
u'Intermediate Use Only',
'OECD HPV Chemicals']
|
def mouseMoveEvent(self, e):
super(PyInteractiveConsole, self).mouseMoveEvent(e)
cursor = self.cursorForPosition(e.pos())
assert isinstance(cursor, QtGui.QTextCursor)
p = cursor.positionInBlock()
usd = cursor.block().userData()
if usd and usd.start_pos_in_block <= p <= usd.end_pos_in_block:
if QtWidgets.QApplication.overrideCursor() is None:
QtWidgets.QApplication.setOverrideCursor(
QtGui.QCursor(QtCore.Qt.PointingHandCursor))
else:
if QtWidgets.QApplication.overrideCursor() is not None:
QtWidgets.QApplication.restoreOverrideCursor()
|
Extends mouseMoveEvent to display a pointing hand cursor when the
mouse cursor is over a file location
|
def getPaths(roots, ignores=None):
paths, count, ignores = [], 0, ignores or []
ignore_re = multiglob_compile(ignores, prefix=False)
for root in roots:
root = os.path.realpath(root)
if os.path.isfile(root):
paths.append(root)
continue
for fldr in os.walk(root):
out.write("Gathering file paths to compare... (%d files examined)"
% count)
for subdir in fldr[1]:
dirpath = os.path.join(fldr[0], subdir)
if ignore_re.match(dirpath):
fldr[1].remove(subdir)
for filename in fldr[2]:
filepath = os.path.join(fldr[0], filename)
if ignore_re.match(filepath):
continue
paths.append(filepath)
count += 1
out.write("Found %s files to be compared for duplication." % (len(paths)),
newline=True)
return paths
|
Recursively walk a set of paths and return a listing of contained files.
:param roots: Relative or absolute paths to files or folders.
:type roots: :class:`~__builtins__.list` of :class:`~__builtins__.str`
:param ignores: A list of :py:mod:`fnmatch` globs to avoid walking and
omit from results
:type ignores: :class:`~__builtins__.list` of :class:`~__builtins__.str`
:returns: Absolute paths to only files.
:rtype: :class:`~__builtins__.list` of :class:`~__builtins__.str`
.. todo:: Try to optimize the ignores matching. Running a regex on every
filename is a fairly significant percentage of the time taken according
to the profiler.
|
def remove(self, collection, **kwargs):
callback = kwargs.pop('callback')
yield Op(self.db[collection].remove, kwargs)
callback()
|
remove records from collection whose parameters match kwargs
|
def render_ranks (graph, ranks, dot_file="graph.dot"):
if dot_file:
write_dot(graph, ranks, path=dot_file)
|
render the TextRank graph for visual formats
|
def _start_processes(self, commands):
Log.info("Start processes")
processes_to_monitor = {}
for (name, command) in commands.items():
p = self._run_process(name, command)
processes_to_monitor[p.pid] = ProcessInfo(p, name, command)
log_pid_for_process(name, p.pid)
with self.process_lock:
self.processes_to_monitor.update(processes_to_monitor)
|
Start all commands and add them to the dict of processes to be monitored
|
def run(self):
filename = ".DS_Store"
command = "find {path} -type f -name \"{filename}\" ".format(path = self.path, filename = filename)
cmd = CommandHelper(command)
cmd.execute()
files = cmd.output.split("\n")
for f in files:
if not f.endswith(filename):
continue
rel_path = f.replace(self.path, "")
if rel_path.startswith(tuple(self.CONFIG['exclude_paths'])):
continue
issue = Issue()
issue.name = "File .DS_Store detected"
issue.potential = False
issue.severity = Issue.SEVERITY_LOW
issue.file = rel_path
self.saveIssue(issue)
|
Finds .DS_Store files into path
|
def importvcf( vcffile, locifile ):
try:
with open( invcffile, 'r' ) as invcf:
for line in invcf:
if line.split()[0] == "
names_col = line.split().index( "FORMAT" ) + 1
names = line.split()[ names_col:]
LOGGER.debug( "Got names - %s", names )
break
print( "wat" )
except Exception:
print( "wat" )
|
Function for importing a vcf file into loci format. Arguments
are the input vcffile and the loci file to write out.
|
def TK_ask(title,msg):
root = tkinter.Tk()
root.attributes("-topmost", True)
root.withdraw()
result=tkinter.messagebox.askyesno(title,msg)
root.destroy()
return result
|
use the GUI to ask YES or NO.
|
def get_shape(img):
if hasattr(img, 'shape'):
shape = img.shape
else:
shape = img.get_data().shape
return shape
|
Return the shape of img.
Paramerers
-----------
img:
Returns
-------
shape: tuple
|
def getPartitionId(self, i):
if (i < 0) or (i >= self._numPatterns):
raise RuntimeError("index out of bounds")
partitionId = self._partitionIdList[i]
if partitionId == numpy.inf:
return None
else:
return partitionId
|
Gets the partition id given an index.
:param i: index of partition
:returns: the partition id associated with pattern i. Returns None if no id
is associated with it.
|
def get_all(self, include_archived=False):
return [conv for conv in self._conv_dict.values()
if not conv.is_archived or include_archived]
|
Get all the conversations.
Args:
include_archived (bool): (optional) Whether to include archived
conversations. Defaults to ``False``.
Returns:
List of all :class:`.Conversation` objects.
|
def add_tweets(self, url, last_modified, tweets):
try:
self.cache[url] = {"last_modified": last_modified, "tweets": tweets}
self.mark_updated()
return True
except TypeError:
return False
|
Adds new tweets to the cache.
|
def read_firmware(self):
self.cnxn.xfer([0x12])
sleep(10e-3)
self.firmware['major'] = self.cnxn.xfer([0x00])[0]
self.firmware['minor'] = self.cnxn.xfer([0x00])[0]
self.firmware['version'] = float('{}.{}'.format(self.firmware['major'], self.firmware['minor']))
sleep(0.1)
return self.firmware
|
Read the firmware version of the OPC-N2. Firmware v18+ only.
:rtype: dict
:Example:
>>> alpha.read_firmware()
{
'major': 18,
'minor': 2,
'version': 18.2
}
|
def cmd():
if platform == 'win':
return ['cmd.exe', '/K']
elif platform == 'linux':
ppid = os.getppid()
ppid_cmdline_file = '/proc/{0}/cmdline'.format(ppid)
try:
with open(ppid_cmdline_file) as f:
cmd = f.read()
if cmd.endswith('\x00'):
cmd = cmd[:-1]
cmd = cmd.split('\x00')
return cmd + [binpath('subshell.sh')]
except:
cmd = 'bash'
else:
cmd = 'bash'
return [cmd, binpath('subshell.sh')]
|
Return a command to launch a subshell
|
def anomalyGetLabels(self, start, end):
return self._getAnomalyClassifier().getSelf().getLabels(start, end)
|
Get labels from the anomaly classifier within this model.
:param start: (int) index to start getting labels
:param end: (int) index to end getting labels
|
def boilerplate(name, contact, description, pmids, version, copyright, authors, licenses, disclaimer, output):
from .document_utils import write_boilerplate
write_boilerplate(
name=name,
version=version,
description=description,
authors=authors,
contact=contact,
copyright=copyright,
licenses=licenses,
disclaimer=disclaimer,
pmids=pmids,
file=output,
)
|
Build a template BEL document with the given PubMed identifiers.
|
def _add_notice_to_docstring(doc, no_doc_str, notice):
if not doc:
lines = [no_doc_str]
else:
lines = _normalize_docstring(doc).splitlines()
notice = [''] + notice
if len(lines) > 1:
if lines[1].strip():
notice.append('')
lines[1:1] = notice
else:
lines += notice
return '\n'.join(lines)
|
Adds a deprecation notice to a docstring.
|
def find_top_dataset(self, dataset_name=None, sort=None, **kwargs):
self._fill_project_info(kwargs)
if dataset_name is None:
raise Exception("dataset_name is None, please give a dataset name")
kwargs.update({'dataset_name': dataset_name})
s = time.time()
d = self.db.Dataset.find_one(filter=kwargs, sort=sort)
if d is not None:
dataset_id = d['dataset_id']
else:
print("[Database] FAIL! Cannot find dataset: {}".format(kwargs))
return False
try:
dataset = self._deserialization(self.dataset_fs.get(dataset_id).read())
pc = self.db.Dataset.find(kwargs)
print("[Database] Find one dataset SUCCESS, {} took: {}s".format(kwargs, round(time.time() - s, 2)))
dataset_id_list = pc.distinct('dataset_id')
n_dataset = len(dataset_id_list)
if n_dataset != 1:
print(" Note that there are {} datasets match the requirement".format(n_dataset))
return dataset
except Exception as e:
exc_type, exc_obj, exc_tb = sys.exc_info()
fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)[1]
logging.info("{} {} {} {} {}".format(exc_type, exc_obj, fname, exc_tb.tb_lineno, e))
return False
|
Finds and returns a dataset from the database which matches the requirement.
Parameters
----------
dataset_name : str
The name of dataset.
sort : List of tuple
PyMongo sort comment, search "PyMongo find one sorting" and `collection level operations <http://api.mongodb.com/python/current/api/pymongo/collection.html>`__ for more details.
kwargs : other events
Other events, such as description, author and etc (optinal).
Examples
---------
Save dataset
>>> db.save_dataset([X_train, y_train, X_test, y_test], 'mnist', description='this is a tutorial')
Get dataset
>>> dataset = db.find_top_dataset('mnist')
>>> datasets = db.find_datasets('mnist')
Returns
--------
dataset : the dataset or False
Return False if nothing found.
|
def get_credential(self, service, username):
if username is not None:
password = self.get_password(service, username)
if password is not None:
return credentials.SimpleCredential(
username,
password,
)
return None
|
Gets the username and password for the service.
Returns a Credential instance.
The *username* argument is optional and may be omitted by
the caller or ignored by the backend. Callers must use the
returned username.
|
def get_server(self, key, **kwds):
kwds = dict(self.kwds, **kwds)
server = self.servers.get(key)
if server:
server.check_keywords(self.constructor, kwds)
else:
server = _CachedServer(self.constructor, key, kwds)
self.servers[key] = server
return server
|
Get a new or existing server for this key.
:param int key: key for the server to use
|
def create_query(section):
query = {}
if 'ports' in section:
query['ports'] = [section['ports']]
if 'up' in section:
query['up'] = bool(section['up'])
if 'search' in section:
query['search'] = [section['search']]
if 'tags' in section:
query['tags'] = [section['tags']]
if 'groups' in section:
query['groups'] = [section['groups']]
return query
|
Creates a search query based on the section of the config file.
|
def pip(self, package_names, raise_on_error=True):
if isinstance(package_names, basestring):
package_names = [package_names]
cmd = "pip install -U %s" % (' '.join(package_names))
return self.wait(cmd, raise_on_error=raise_on_error)
|
Install specified python packages using pip. -U option added
Waits for command to finish.
Parameters
----------
package_names: list-like of str
raise_on_error: bool, default True
If True then raise ValueError if stderr is not empty
|
def _launch_all(self, launchers):
for launcher in launchers:
print("== Launching %s ==" % launcher.batch_name)
launcher()
return True
|
Launches all available launchers.
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.