code
stringlengths
59
4.4k
docstring
stringlengths
5
7.69k
def set_keyring(keyring): global _keyring_backend if not isinstance(keyring, backend.KeyringBackend): raise TypeError("The keyring must be a subclass of KeyringBackend") _keyring_backend = keyring
Set current keyring backend.
def run_program(prog_list, debug, shell): try: if not shell: process = Popen(prog_list, stdout=PIPE, stderr=PIPE) stdout, stderr = process.communicate() retcode = process.returncode if debug >= 1: print("Program : ", " ".join(prog_list)) print("Return Code: ", retcode) print("Stdout: ", stdout) print("Stderr: ", stderr) return bool(retcode) else: command = " ".join(prog_list) os.system(command) return True except: return False
Run a program and check program return code Note that some commands don't work well with Popen. So if this function is specifically called with 'shell=True', then it will run the old 'os.system'. In which case, there is no program output
def fetch_repo_creation_date(self): gh = self.github user = self.options.user repo = self.options.project rc, data = gh.repos[user][repo].get() if rc == 200: return REPO_CREATED_TAG_NAME, data["created_at"] else: self.raise_GitHubError(rc, data, gh.getheaders()) return None, None
Get the creation date of the repository from GitHub. :rtype: str, str :return: special tag name, creation date as ISO date string
def inverse_dynamics(self, angles, start=0, end=1e100, states=None, max_force=100): if states is not None: self.skeleton.set_body_states(states) for frame_no, frame in enumerate(angles): if frame_no < start: continue if frame_no >= end: break self.ode_space.collide(None, self.on_collision) states = self.skeleton.get_body_states() self.skeleton.set_body_states(states) self.skeleton.enable_motors(max_force) self.skeleton.set_target_angles(angles[frame_no]) self.ode_world.step(self.dt) torques = self.skeleton.joint_torques self.skeleton.disable_motors() self.skeleton.set_body_states(states) self.skeleton.add_torques(torques) yield torques self.ode_world.step(self.dt) self.ode_contactgroup.empty()
Follow a set of angle data, yielding dynamic joint torques. Parameters ---------- angles : ndarray (num-frames x num-dofs) Follow angle data provided by this array of angle values. start : int, optional Start following angle data after this frame. Defaults to the start of the angle data. end : int, optional Stop following angle data after this frame. Defaults to the end of the angle data. states : list of body states, optional If given, set the states of the skeleton bodies to these values before starting to follow the marker data. max_force : float, optional Allow each degree of freedom in the skeleton to exert at most this force when attempting to follow the given joint angles. Defaults to 100N. Setting this value to be large results in more accurate following but can cause oscillations in the PID controllers, resulting in noisy torques. Returns ------- torques : sequence of torque frames Returns a generator of joint torque data for the skeleton. One set of joint torques will be generated for each frame of angle data between `start` and `end`.
def recursive_glob(base_directory, regex=''): files = glob(op.join(base_directory, regex)) for path, dirlist, filelist in os.walk(base_directory): for dir_name in dirlist: files.extend(glob(op.join(path, dir_name, regex))) return files
Uses glob to find all files or folders that match the regex starting from the base_directory. Parameters ---------- base_directory: str regex: str Returns ------- files: list
def process_point_value(cls, command_type, command, index, op_type): _log.debug('Processing received point value for index {}: {}'.format(index, command))
A PointValue was received from the Master. Process its payload. :param command_type: (string) Either 'Select' or 'Operate'. :param command: A ControlRelayOutputBlock or else a wrapped data value (AnalogOutputInt16, etc.). :param index: (integer) DNP3 index of the payload's data definition. :param op_type: An OperateType, or None if command_type == 'Select'.
def add_color_to_scheme(scheme, name, foreground, background, palette_colors): if foreground is None and background is None: return scheme new_scheme = [] for item in scheme: if item[0] == name: if foreground is None: foreground = item[1] if background is None: background = item[2] if palette_colors > 16: new_scheme.append((name, '', '', '', foreground, background)) else: new_scheme.append((name, foreground, background)) else: new_scheme.append(item) return new_scheme
Add foreground and background colours to a color scheme
def array_2d_from_array_1d(self, array_1d): return mapping_util.map_masked_1d_array_to_2d_array_from_array_1d_shape_and_one_to_two( array_1d=array_1d, shape=self.mask.shape, one_to_two=self.mask.masked_grid_index_to_pixel)
Map a 1D array the same dimension as the grid to its original masked 2D array. Parameters ----------- array_1d : ndarray The 1D array which is mapped to its masked 2D array.
def connect(): ftp_class = ftplib.FTP if not SSL else ftplib.FTP_TLS ftp = ftp_class(timeout=TIMEOUT) ftp.connect(HOST, PORT) ftp.login(USER, PASSWORD) if SSL: ftp.prot_p() return ftp
Connect to FTP server, login and return an ftplib.FTP instance.
def to_table(components, topo_info): inputs, outputs = defaultdict(list), defaultdict(list) for ctype, component in components.items(): if ctype == 'bolts': for component_name, component_info in component.items(): for input_stream in component_info['inputs']: input_name = input_stream['component_name'] inputs[component_name].append(input_name) outputs[input_name].append(component_name) info = [] spouts_instance = topo_info['physical_plan']['spouts'] bolts_instance = topo_info['physical_plan']['bolts'] for ctype, component in components.items(): if ctype == "stages": continue for component_name, component_info in component.items(): row = [ctype[:-1], component_name] if ctype == 'spouts': row.append(len(spouts_instance[component_name])) else: row.append(len(bolts_instance[component_name])) row.append(','.join(inputs.get(component_name, ['-']))) row.append(','.join(outputs.get(component_name, ['-']))) info.append(row) header = ['type', 'name', 'parallelism', 'input', 'output'] return info, header
normalize raw logical plan info to table
def pickle_load(path, compression=False): if compression: with zipfile.ZipFile(path, "r", compression=zipfile.ZIP_DEFLATED) as myzip: with myzip.open("data") as f: return pickle.load(f) else: with open(path, "rb") as f: return pickle.load(f)
Unpickle a possible compressed pickle. Parameters ---------- path: str path to the output file compression: bool if true assumes that pickle was compressed when created and attempts decompression. Returns ------- obj: object the unpickled object
def get_stdout(self, workflow_id, task_id): url = '%(wf_url)s/%(wf_id)s/tasks/%(task_id)s/stdout' % { 'wf_url': self.workflows_url, 'wf_id': workflow_id, 'task_id': task_id } r = self.gbdx_connection.get(url) r.raise_for_status() return r.text
Get stdout for a particular task. Args: workflow_id (str): Workflow id. task_id (str): Task id. Returns: Stdout of the task (string).
def _getMatchingRowsNoRetries(self, tableInfo, conn, fieldsToMatch, selectFieldNames, maxRows=None): assert fieldsToMatch, repr(fieldsToMatch) assert all(k in tableInfo.dbFieldNames for k in fieldsToMatch.iterkeys()), repr(fieldsToMatch) assert selectFieldNames, repr(selectFieldNames) assert all(f in tableInfo.dbFieldNames for f in selectFieldNames), repr( selectFieldNames) matchPairs = fieldsToMatch.items() matchExpressionGen = ( p[0] + (' IS ' + {True:'TRUE', False:'FALSE'}[p[1]] if isinstance(p[1], bool) else ' IS NULL' if p[1] is None else ' IN %s' if isinstance(p[1], self._SEQUENCE_TYPES) else '=%s') for p in matchPairs) matchFieldValues = [p[1] for p in matchPairs if (not isinstance(p[1], (bool)) and p[1] is not None)] query = 'SELECT %s FROM %s WHERE (%s)' % ( ','.join(selectFieldNames), tableInfo.tableName, ' AND '.join(matchExpressionGen)) sqlParams = matchFieldValues if maxRows is not None: query += ' LIMIT %s' sqlParams.append(maxRows) conn.cursor.execute(query, sqlParams) rows = conn.cursor.fetchall() if rows: assert maxRows is None or len(rows) <= maxRows, "%d !<= %d" % ( len(rows), maxRows) assert len(rows[0]) == len(selectFieldNames), "%d != %d" % ( len(rows[0]), len(selectFieldNames)) else: rows = tuple() return rows
Return a sequence of matching rows with the requested field values from a table or empty sequence if nothing matched. tableInfo: Table information: a ClientJobsDAO._TableInfoBase instance conn: Owned connection acquired from ConnectionFactory.get() fieldsToMatch: Dictionary of internal fieldName/value mappings that identify the desired rows. If a value is an instance of ClientJobsDAO._SEQUENCE_TYPES (list/set/tuple), then the operator 'IN' will be used in the corresponding SQL predicate; if the value is bool: "IS TRUE/FALSE"; if the value is None: "IS NULL"; '=' will be used for all other cases. selectFieldNames: list of fields to return, using internal field names maxRows: maximum number of rows to return; unlimited if maxRows is None retval: A sequence of matching rows, each row consisting of field values in the order of the requested field names. Empty sequence is returned when not match exists.
def clone(self): result = copy.copy(self) result._compound_mfrs = copy.deepcopy(self._compound_mfrs) return result
Create a complete copy of the stream. :returns: A new MaterialStream object.
def makedirs(path): if not os.path.isdir(path): os.makedirs(path) return path
Create directories if they do not exist, otherwise do nothing. Return path for convenience
def get_element_mfr(self, element): result = 0.0 for compound in self.material.compounds: formula = compound.split('[')[0] result += self.get_compound_mfr(compound) *\ stoich.element_mass_fraction(formula, element) return result
Determine the mass flow rate of the specified elements in the stream. :returns: Mass flow rates. [kg/h]
def characters(quantity=10): line = map(_to_lower_alpha_only, ''.join(random.sample(get_dictionary('lorem_ipsum'), quantity))) return ''.join(line)[:quantity]
Return random characters.
def create_log_config(verbose, quiet): if verbose and quiet: raise ValueError( "Supplying both --quiet and --verbose makes no sense." ) elif verbose: level = logging.DEBUG elif quiet: level = logging.ERROR else: level = logging.INFO logger_cfg = {"handlers": ["click_handler"], "level": level} return { "version": 1, "formatters": {"click_formatter": {"format": "%(message)s"}}, "handlers": { "click_handler": { "level": level, "class": "doc2dash.__main__.ClickEchoHandler", "formatter": "click_formatter", } }, "loggers": {"doc2dash": logger_cfg, "__main__": logger_cfg}, }
We use logging's levels as an easy-to-use verbosity controller.
def _is_ndb(self): if isinstance(self._model, type): if _NDB_MODEL is not None and issubclass(self._model, _NDB_MODEL): return True elif issubclass(self._model, db.Model): return False raise TypeError( 'Model class not an NDB or DB model: {0}.'.format(self._model))
Determine whether the model of the instance is an NDB model. Returns: Boolean indicating whether or not the model is an NDB or DB model.
def convert_clip(params, w_name, scope_name, inputs, layers, weights, names): print('Converting clip ...') if params['min'] == 0: print("using ReLU({0})".format(params['max'])) layer = keras.layers.ReLU(max_value=params['max']) else: def target_layer(x, vmin=params['min'], vmax=params['max']): import tensorflow as tf return tf.clip_by_value(x, vmin, vmax) layer = keras.layers.Lambda(target_layer) layers[scope_name] = layer(layers[inputs[0]])
Convert clip operation. Args: params: dictionary with layer parameters w_name: name prefix in state_dict scope_name: pytorch scope name inputs: pytorch node inputs layers: dictionary with keras tensors weights: pytorch state_dict names: use short names for keras layers
def add_newlines(f, output, char): line_count = get_line_count(f) f = open(f, 'r+') output = open(output, 'r+') for line in range(line_count): string = f.readline() string = re.sub(char, char + '\n', string) output.write(string)
Adds line breaks after every occurance of a given character in a file. Args: f: string, path to input file. output: string, path to output file. Returns: None.
def set_training(model, mode): if mode is None: yield return old_mode = model.training if old_mode != mode: model.train(mode) try: yield finally: if old_mode != mode: model.train(old_mode)
A context manager to temporarily set the training mode of 'model' to 'mode', resetting it when we exit the with-block. A no-op if mode is None.
def fit(self, X, y): word_vector_transformer = WordVectorTransformer(padding='max') X = word_vector_transformer.fit_transform(X) X = LongTensor(X) self.word_vector_transformer = word_vector_transformer y_transformer = LabelEncoder() y = y_transformer.fit_transform(y) y = torch.from_numpy(y) self.y_transformer = y_transformer dataset = CategorizedDataset(X, y) dataloader = DataLoader(dataset, batch_size=self.batch_size, shuffle=True, num_workers=4) KERNEL_SIZES = self.kernel_sizes NUM_KERNEL = self.num_kernel EMBEDDING_DIM = self.embedding_dim model = TextCNN( vocab_size=word_vector_transformer.get_vocab_size(), embedding_dim=EMBEDDING_DIM, output_size=len(self.y_transformer.classes_), kernel_sizes=KERNEL_SIZES, num_kernel=NUM_KERNEL) if USE_CUDA: model = model.cuda() EPOCH = self.epoch LR = self.lr loss_function = nn.CrossEntropyLoss() optimizer = optim.Adam(model.parameters(), lr=LR) for epoch in range(EPOCH): losses = [] for i, data in enumerate(dataloader): X, y = data X, y = Variable(X), Variable(y) optimizer.zero_grad() model.train() output = model(X) loss = loss_function(output, y) losses.append(loss.data.tolist()[0]) loss.backward() optimizer.step() if i % 100 == 0: print("[%d/%d] mean_loss : %0.2f" % ( epoch, EPOCH, np.mean(losses))) losses = [] self.model = model
Fit KimCNNClassifier according to X, y Parameters ---------- X : list of string each item is a raw text y : list of string each item is a label
def copy_file(self, path, prefixed_path, source_storage): if prefixed_path in self.copied_files: return self.log("Skipping '%s' (already copied earlier)" % path) if not self.delete_file(path, prefixed_path, source_storage): return source_path = source_storage.path(path) if self.dry_run: self.log("Pretending to copy '%s'" % source_path, level=1) else: self.log("Copying '%s'" % source_path, level=1) with source_storage.open(path) as source_file: self.storage.save(prefixed_path, source_file) self.copied_files.append(prefixed_path)
Attempt to copy ``path`` with storage
def calculate_average_scores_on_graph( graph: BELGraph, key: Optional[str] = None, tag: Optional[str] = None, default_score: Optional[float] = None, runs: Optional[int] = None, use_tqdm: bool = False, ): subgraphs = generate_bioprocess_mechanisms(graph, key=key) scores = calculate_average_scores_on_subgraphs( subgraphs, key=key, tag=tag, default_score=default_score, runs=runs, use_tqdm=use_tqdm ) return scores
Calculate the scores over all biological processes in the sub-graph. As an implementation, it simply computes the sub-graphs then calls :func:`calculate_average_scores_on_subgraphs` as described in that function's documentation. :param graph: A BEL graph with heats already on the nodes :param key: The key in the node data dictionary representing the experimental data. Defaults to :data:`pybel_tools.constants.WEIGHT`. :param tag: The key for the nodes' data dictionaries where the scores will be put. Defaults to 'score' :param default_score: The initial score for all nodes. This number can go up or down. :param runs: The number of times to run the heat diffusion workflow. Defaults to 100. :param use_tqdm: Should there be a progress bar for runners? :return: A dictionary of {pybel node tuple: results tuple} :rtype: dict[tuple, tuple] Suggested usage with :mod:`pandas`: >>> import pandas as pd >>> from pybel_tools.analysis.heat import calculate_average_scores_on_graph >>> graph = ... # load graph and data >>> scores = calculate_average_scores_on_graph(graph) >>> pd.DataFrame.from_items(scores.items(), orient='index', columns=RESULT_LABELS)
def _readxml(self): block = re.sub(r'<(/?)s>', r'&lt;\1s&gt;', self._readblock()) try: xml = XML(block) except ParseError: xml = None return xml
Read a block and return the result as XML :return: block as xml :rtype: xml.etree.ElementTree
def merge(*args): ret = [] for arg in args: if isinstance(arg, list) or isinstance(arg, tuple): ret += list(arg) else: ret.append(arg) return ret
Implements the 'merge' operator for merging lists.
def update_query_params(uri, params): parts = urllib.parse.urlparse(uri) query_params = parse_unique_urlencoded(parts.query) query_params.update(params) new_query = urllib.parse.urlencode(query_params) new_parts = parts._replace(query=new_query) return urllib.parse.urlunparse(new_parts)
Updates a URI with new query parameters. If a given key from ``params`` is repeated in the ``uri``, then the URI will be considered invalid and an error will occur. If the URI is valid, then each value from ``params`` will replace the corresponding value in the query parameters (if it exists). Args: uri: string, A valid URI, with potential existing query parameters. params: dict, A dictionary of query parameters. Returns: The same URI but with the new query parameters added.
def posteriori_covariance(self): r K = GLMM.covariance(self) tau = self._ep._posterior.tau return pinv(pinv(K) + diag(1 / tau))
r""" Covariance of the estimated posteriori.
def image(self, well_row, well_column, field_row, field_column): return next((i for i in self.images if attribute(i, 'u') == well_column and attribute(i, 'v') == well_row and attribute(i, 'x') == field_column and attribute(i, 'y') == field_row), '')
Get path of specified image. Parameters ---------- well_row : int Starts at 0. Same as --U in files. well_column : int Starts at 0. Same as --V in files. field_row : int Starts at 0. Same as --Y in files. field_column : int Starts at 0. Same as --X in files. Returns ------- string Path to image or empty string if image is not found.
def j2(x): to_return = 2./(x+1e-15)*j1(x) - j0(x) to_return[x==0] = 0 return to_return
A fast j2 defined in terms of other special functions
def getInstanceJstack(self, topology_info, instance_id): pid_response = yield getInstancePid(topology_info, instance_id) try: http_client = tornado.httpclient.AsyncHTTPClient() pid_json = json.loads(pid_response) pid = pid_json['stdout'].strip() if pid == '': raise Exception('Failed to get pid') endpoint = utils.make_shell_endpoint(topology_info, instance_id) url = "%s/jstack/%s" % (endpoint, pid) response = yield http_client.fetch(url) Log.debug("HTTP call for url: %s", url) raise tornado.gen.Return(response.body) except tornado.httpclient.HTTPError as e: raise Exception(str(e))
Fetches Instance jstack from heron-shell.
def tsms(when, tz=None): if not when: return None when = totz(when, tz) return calendar.timegm(when.timetuple()) * 1000 + int(round(when.microsecond / 1000.0))
Return a Unix timestamp in milliseconds for the provided datetime. The `totz` function is called on the datetime to convert it to the provided timezone. It will be converted to UTC if no timezone is provided.
def bitsToString(arr): s = array('c','.'*len(arr)) for i in xrange(len(arr)): if arr[i] == 1: s[i]='*' return s
Returns a string representing a numpy array of 0's and 1's
def assign_params(sess, params, network): ops = [] for idx, param in enumerate(params): ops.append(network.all_params[idx].assign(param)) if sess is not None: sess.run(ops) return ops
Assign the given parameters to the TensorLayer network. Parameters ---------- sess : Session TensorFlow Session. params : list of array A list of parameters (array) in order. network : :class:`Layer` The network to be assigned. Returns -------- list of operations A list of tf ops in order that assign params. Support sess.run(ops) manually. Examples -------- - See ``tl.files.save_npz`` References ---------- - `Assign value to a TensorFlow variable <http://stackoverflow.com/questions/34220532/how-to-assign-value-to-a-tensorflow-variable>`__
def count(self): xml = get_changeset(self.id) actions = [action.tag for action in xml.getchildren()] self.create = actions.count('create') self.modify = actions.count('modify') self.delete = actions.count('delete') self.verify_editor() try: if (self.create / len(actions) > self.percentage and self.create > self.create_threshold and (self.powerfull_editor or self.create > self.top_threshold)): self.label_suspicious('possible import') elif (self.modify / len(actions) > self.percentage and self.modify > self.modify_threshold): self.label_suspicious('mass modification') elif ((self.delete / len(actions) > self.percentage and self.delete > self.delete_threshold) or self.delete > self.top_threshold): self.label_suspicious('mass deletion') except ZeroDivisionError: print('It seems this changeset was redacted')
Count the number of elements created, modified and deleted by the changeset and analyses if it is a possible import, mass modification or a mass deletion.
def bounding_box(self): min_x, min_y, max_x, max_y = zip(*list(self.walk_rows( lambda row: row.bounding_box))) return min(min_x), min(min_y), max(max_x), max(max_y)
The minimum and maximum bounds of this layout. :return: ``(min_x, min_y, max_x, max_y)`` the bounding box of this layout :rtype: tuple
def swish(x, name='swish'): with tf.name_scope(name): x = tf.nn.sigmoid(x) * x return x
Swish function. See `Swish: a Self-Gated Activation Function <https://arxiv.org/abs/1710.05941>`__. Parameters ---------- x : Tensor input. name: str function name (optional). Returns ------- Tensor A ``Tensor`` in the same type as ``x``.
def transform_title(self, content_metadata_item): title_with_locales = [] for locale in self.enterprise_configuration.get_locales(): title_with_locales.append({ 'locale': locale, 'value': content_metadata_item.get('title', '') }) return title_with_locales
Return the title of the content item.
def annotate_metadata_data(repo, task, patterns=["*"], size=0): mgr = plugins_get_mgr() keys = mgr.search('representation')['representation'] representations = [mgr.get_by_key('representation', k) for k in keys] matching_files = repo.find_matching_files(patterns) package = repo.package rootdir = repo.rootdir files = package['resources'] for f in files: relativepath = f['relativepath'] if relativepath in matching_files: path = os.path.join(rootdir, relativepath) if task == 'preview': print("Adding preview for ", relativepath) f['content'] = open(path).read()[:size] elif task == 'schema': for r in representations: if r.can_process(path): print("Adding schema for ", path) f['schema'] = r.get_schema(path) break
Update metadata with the content of the files
def ast2str(expr, level=0, names=None): if isinstance(expr, Expression): return ast2str(expr.body, 0, names) \ if hasattr(expr, "body") else "" elif isinstance(expr, Name): return names.get(expr.id, expr.id) if names else expr.id elif isinstance(expr, BoolOp): op = expr.op if isinstance(op, Or): str_exp = " or ".join(ast2str(i, level + 1, names) for i in expr.values) elif isinstance(op, And): str_exp = " and ".join(ast2str(i, level + 1, names) for i in expr.values) else: raise TypeError("unsupported operation " + op.__class__.__name) return "(" + str_exp + ")" if level else str_exp elif expr is None: return "" else: raise TypeError("unsupported operation " + repr(expr))
convert compiled ast to gene_reaction_rule str Parameters ---------- expr : str string for a gene reaction rule, e.g "a and b" level : int internal use only names : dict Dict where each element id a gene identifier and the value is the gene name. Use this to get a rule str which uses names instead. This should be done for display purposes only. All gene_reaction_rule strings which are computed with should use the id. Returns ------ string The gene reaction rule
def multitype_sort(a): types = defaultdict(list) numbers = {int, float, complex} for x in a: t = type(x) if t in numbers: types['number'].append(x) else: types[t].append(x) for t in types: types[t] = np.sort(types[t]) return list(chain(*(types[t] for t in types)))
Sort elements of multiple types x is assumed to contain elements of different types, such that plain sort would raise a `TypeError`. Parameters ---------- a : array-like Array of items to be sorted Returns ------- out : list Items sorted within their type groups.
def dist(self, src, tar): if src == tar: return 0.0 src_comp = self._rle.encode(self._bwt.encode(src)) tar_comp = self._rle.encode(self._bwt.encode(tar)) concat_comp = self._rle.encode(self._bwt.encode(src + tar)) concat_comp2 = self._rle.encode(self._bwt.encode(tar + src)) return ( min(len(concat_comp), len(concat_comp2)) - min(len(src_comp), len(tar_comp)) ) / max(len(src_comp), len(tar_comp))
Return the NCD between two strings using BWT plus RLE. Parameters ---------- src : str Source string for comparison tar : str Target string for comparison Returns ------- float Compression distance Examples -------- >>> cmp = NCDbwtrle() >>> cmp.dist('cat', 'hat') 0.75 >>> cmp.dist('Niall', 'Neil') 0.8333333333333334 >>> cmp.dist('aluminum', 'Catalan') 1.0 >>> cmp.dist('ATCG', 'TAGC') 0.8
def _ensure_managed_repos_dir_exists(): if not os.path.exists(constants.REPOS_DIR): os.makedirs(constants.REPOS_DIR)
Our exports file will be invalid if this folder doesn't exist, and the NFS server will not run correctly.
def convert_lsstdoc_tex( content, to_fmt, deparagraph=False, mathjax=False, smart=True, extra_args=None): augmented_content = '\n'.join((LSSTDOC_MACROS, content)) return convert_text( augmented_content, 'latex', to_fmt, deparagraph=deparagraph, mathjax=mathjax, smart=smart, extra_args=extra_args)
Convert lsstdoc-class LaTeX to another markup format. This function is a thin wrapper around `convert_text` that automatically includes common lsstdoc LaTeX macros. Parameters ---------- content : `str` Original content. to_fmt : `str` Output format for the content (see https://pandoc.org/MANUAL.html). For example, 'html5'. deparagraph : `bool`, optional If `True`, then the `lsstprojectmeta.pandoc.filters.deparagraph.deparagraph` filter is used to remove paragraph (``<p>``, for example) tags around a single paragraph of content. That filter does not affect content that consists of multiple blocks (several paragraphs, or lists, for example). Default is `False`. For example, **without** this filter Pandoc will convert the string ``"Title text"`` to ``"<p>Title text</p>"`` in HTML. The paragraph tags aren't useful if you intend to wrap the converted content in different tags, like ``<h1>``, using your own templating system. **With** this filter, Pandoc will convert the string ``"Title text"`` to ``"Title text"`` in HTML. mathjax : `bool`, optional If `True` then Pandoc will markup output content to work with MathJax. Default is False. smart : `bool`, optional If `True` (default) then ascii characters will be converted to unicode characters like smart quotes and em dashes. extra_args : `list`, optional Sequence of Pandoc arguments command line arguments (such as ``'--normalize'``). The ``deparagraph``, ``mathjax``, and ``smart`` arguments are convenience arguments that are equivalent to items in ``extra_args``. Returns ------- output : `str` Content in the output (``to_fmt``) format. Notes ----- This function will automatically install Pandoc if it is not available. See `ensure_pandoc`.
async def manage(self): cm = _ContextManager(self.database) if isinstance(self.database.obj, AIODatabase): cm.connection = await self.database.async_connect() else: cm.connection = self.database.connect() return cm
Manage a database connection.
def extract_features_and_generate_model(essays, algorithm=util_functions.AlgorithmTypes.regression): f = feature_extractor.FeatureExtractor() f.initialize_dictionaries(essays) train_feats = f.gen_feats(essays) set_score = numpy.asarray(essays._score, dtype=numpy.int) if len(util_functions.f7(list(set_score)))>5: algorithm = util_functions.AlgorithmTypes.regression else: algorithm = util_functions.AlgorithmTypes.classification clf,clf2 = get_algorithms(algorithm) cv_error_results=get_cv_error(clf2,train_feats,essays._score) try: clf.fit(train_feats, set_score) except ValueError: log.exception("Not enough classes (0,1,etc) in sample.") set_score[0]=1 set_score[1]=0 clf.fit(train_feats, set_score) return f, clf, cv_error_results
Feed in an essay set to get feature vector and classifier essays must be an essay set object additional array is an optional argument that can specify a numpy array of values to add in returns a trained FeatureExtractor object and a trained classifier
def pick_coda_from_decimal(decimal): decimal = Decimal(decimal) __, digits, exp = decimal.as_tuple() if exp < 0: return DIGIT_CODAS[digits[-1]] __, digits, exp = decimal.normalize().as_tuple() index = bisect_right(EXP_INDICES, exp) - 1 if index < 0: return DIGIT_CODAS[digits[-1]] else: return EXP_CODAS[EXP_INDICES[index]]
Picks only a coda from a decimal.
def tool_factory(clsname, name, driver, base=GromacsCommand): clsdict = { 'command_name': name, 'driver': driver, '__doc__': property(base._get_gmx_docs) } return type(clsname, (base,), clsdict)
Factory for GromacsCommand derived types.
def get_extr_lics_xref(self, extr_lic): xrefs = list(self.graph.triples((extr_lic, RDFS.seeAlso, None))) return map(lambda xref_triple: xref_triple[2], xrefs)
Return a list of cross references.
def norm_remote_path(path): path = os.path.normpath(path) if path.startswith(os.path.sep): return path[1:] else: return path
Normalize `path`. All remote paths are absolute.
def save_component(self, component_name, save_path): component = self.get_component(component_name=component_name) self._validate_savable(component=component, component_name=component_name) return component.save(sess=self.session, save_path=save_path)
Saves a component of this model to the designated location. Args: component_name: The component to save. save_path: The location to save to. Returns: Checkpoint path where the component was saved.
def dsa_sign(private_key, data, hash_algorithm): if private_key.algorithm != 'dsa': raise ValueError('The key specified is not a DSA private key') return _sign(private_key, data, hash_algorithm)
Generates a DSA signature :param private_key: The PrivateKey to generate the signature with :param data: A byte string of the data the signature is for :param hash_algorithm: A unicode string of "md5", "sha1", "sha256", "sha384" or "sha512" :raises: ValueError - when any of the parameters contain an invalid value TypeError - when any of the parameters are of the wrong type OSError - when an error is returned by the OS crypto library :return: A byte string of the signature
def generateStats(filename, maxSamples = None,): statsCollectorMapping = {'float': FloatStatsCollector, 'int': IntStatsCollector, 'string': StringStatsCollector, 'datetime': DateTimeStatsCollector, 'bool': BoolStatsCollector, } filename = resource_filename("nupic.datafiles", filename) print "*"*40 print "Collecting statistics for file:'%s'" % (filename,) dataFile = FileRecordStream(filename) statsCollectors = [] for fieldName, fieldType, fieldSpecial in dataFile.getFields(): statsCollector = \ statsCollectorMapping[fieldType](fieldName, fieldType, fieldSpecial) statsCollectors.append(statsCollector) if maxSamples is None: maxSamples = 500000 for i in xrange(maxSamples): record = dataFile.getNextRecord() if record is None: break for i, value in enumerate(record): statsCollectors[i].addValue(value) stats = {} for statsCollector in statsCollectors: statsCollector.getStats(stats) if dataFile.getResetFieldIdx() is not None: resetFieldName,_,_ = dataFile.getFields()[dataFile.reset] stats.pop(resetFieldName) if VERBOSITY > 0: pprint.pprint(stats) return stats
Collect statistics for each of the fields in the user input data file and return a stats dict object. Parameters: ------------------------------------------------------------------------------ filename: The path and name of the data file. maxSamples: Upper bound on the number of rows to be processed retval: A dictionary of dictionaries. The top level keys are the field names and the corresponding values are the statistics collected for the individual file. Example: { 'consumption':{'min':0,'max':90,'mean':50,...}, 'gym':{'numDistinctCategories':10,...}, ... }
def add_condor_job(self, token, batchmaketaskid, jobdefinitionfilename, outputfilename, errorfilename, logfilename, postfilename): parameters = dict() parameters['token'] = token parameters['batchmaketaskid'] = batchmaketaskid parameters['jobdefinitionfilename'] = jobdefinitionfilename parameters['outputfilename'] = outputfilename parameters['errorfilename'] = errorfilename parameters['logfilename'] = logfilename parameters['postfilename'] = postfilename response = self.request('midas.batchmake.add.condor.job', parameters) return response
Add a Condor DAG job to the Condor DAG associated with this Batchmake task :param token: A valid token for the user in question. :type token: string :param batchmaketaskid: id of the Batchmake task for this DAG :type batchmaketaskid: int | long :param jobdefinitionfilename: Filename of the definition file for the job :type jobdefinitionfilename: string :param outputfilename: Filename of the output file for the job :type outputfilename: string :param errorfilename: Filename of the error file for the job :type errorfilename: string :param logfilename: Filename of the log file for the job :type logfilename: string :param postfilename: Filename of the post script log file for the job :type postfilename: string :return: The created Condor job DAO. :rtype: dict
def export(self, model_name, export_folder): for transformer in self.transformers: if isinstance(transformer, MultiLabelBinarizer): joblib.dump(transformer, join(export_folder, "label.transformer.bin"), protocol=2) if isinstance(transformer, TfidfVectorizer): joblib.dump(transformer, join(export_folder, "tfidf.transformer.bin"), protocol=2) if isinstance(transformer, CountVectorizer): joblib.dump(transformer, join(export_folder, "count.transformer.bin"), protocol=2) if isinstance(transformer, NumberRemover): joblib.dump(transformer, join(export_folder, "number.transformer.bin"), protocol=2) model = [model for model in self.models if model.name == model_name][0] e = Experiment(self.X, self.y, model.estimator, None) model_filename = join(export_folder, "model.bin") e.export(model_filename)
Export model and transformers to export_folder Parameters ---------- model_name: string name of model to export export_folder: string folder to store exported model and transformers
def clear(self, *args, **kwargs): super(Deposit, self).clear(*args, **kwargs)
Clear only drafts. Status required: ``'draft'``. Meta information inside `_deposit` are preserved.
def pitch_contour(annotation, **kwargs): ax = kwargs.pop('ax', None) ax = mir_eval.display.__get_axes(ax=ax)[0] times, values = annotation.to_interval_values() indices = np.unique([v['index'] for v in values]) for idx in indices: rows = [i for (i, v) in enumerate(values) if v['index'] == idx] freqs = np.asarray([values[r]['frequency'] for r in rows]) unvoiced = ~np.asarray([values[r]['voiced'] for r in rows]) freqs[unvoiced] *= -1 ax = mir_eval.display.pitch(times[rows, 0], freqs, unvoiced=True, ax=ax, **kwargs) return ax
Plotting wrapper for pitch contours
def encode(self, o): if isinstance(o, basestring): if isinstance(o, str): _encoding = self.encoding if (_encoding is not None and not (_encoding == 'utf-8')): o = o.decode(_encoding) if self.ensure_ascii: return encode_basestring_ascii(o) else: return encode_basestring(o) chunks = list(self.iterencode(o)) return ''.join(chunks)
Return a JSON string representation of a Python data structure. >>> JSONEncoder().encode({"foo": ["bar", "baz"]}) '{"foo": ["bar", "baz"]}'
def child(self, offset256): a = bytes(self.pubkey) + offset256 s = hashlib.sha256(a).digest() return self.derive_from_seed(s)
Derive new private key from this key and a sha256 "offset"
def pop_data(self): data = self.data self.data = SortedKeyList(key=self._key) return data
Replace this observation's data with a fresh container. Returns ------- annotation_data : SortedKeyList The original annotation data container
def _isCheckpointDir(checkpointDir): lastSegment = os.path.split(checkpointDir)[1] if lastSegment[0] == '.': return False if not checkpointDir.endswith(g_defaultCheckpointExtension): return False if not os.path.isdir(checkpointDir): return False return True
Return true iff checkpointDir appears to be a checkpoint directory.
def from_participantid(participant_id): return user.UserID( chat_id=participant_id.chat_id, gaia_id=participant_id.gaia_id )
Convert hangouts_pb2.ParticipantId to UserID.
def draw_freehand(self): if _ctx._ns["mousedown"]: x, y = mouse() if self.show_grid: x, y = self.grid.snap(x, y) if self.freehand_move == True: cmd = MOVETO self.freehand_move = False else: cmd = LINETO pt = PathElement() if cmd != MOVETO: pt.freehand = True else: pt.freehand = False pt.cmd = cmd pt.x = x pt.y = y pt.ctrl1 = Point(x,y) pt.ctrl2 = Point(x,y) self._points.append(pt) r = 4 _ctx.nofill() _ctx.stroke(self.handle_color) _ctx.oval(pt.x-r, pt.y-r, r*2, r*2) _ctx.fontsize(9) _ctx.fill(self.handle_color) _ctx.text(" ("+str(int(pt.x))+", "+str(int(pt.y))+")", pt.x+r, pt.y) self._dirty = True else: self.freehand_move = True if self._dirty: self._points[-1].freehand = False self.export_svg() self._dirty = False
Freehand sketching.
def get_user_by_email(self, email): parameters = dict() parameters['email'] = email response = self.request('midas.user.get', parameters) return response
Get a user by the email of that user. :param email: The email of the desired user. :type email: string :returns: The user requested. :rtype: dict
def _stack_positions(positions, pos_in_dollars=True): if pos_in_dollars: positions = get_percent_alloc(positions) positions = positions.drop('cash', axis='columns') positions = positions.stack() positions.index = positions.index.set_names(['dt', 'ticker']) return positions
Convert positions to percentages if necessary, and change them to long format. Parameters ---------- positions: pd.DataFrame Daily holdings (in dollars or percentages), indexed by date. Will be converted to percentages if positions are in dollars. Short positions show up as cash in the 'cash' column. pos_in_dollars : bool Flag indicating whether `positions` are in dollars or percentages If True, positions are in dollars.
def app_class(): try: pkg_resources.get_distribution('invenio-files-rest') from invenio_files_rest.app import Flask as FlaskBase except pkg_resources.DistributionNotFound: from flask import Flask as FlaskBase class Request(TrustedHostsMixin, FlaskBase.request_class): pass class Flask(FlaskBase): request_class = Request return Flask
Create Flask application class. Invenio-Files-REST needs to patch the Werkzeug form parsing in order to support streaming large file uploads. This is done by subclassing the Flask application class.
def write_json(self, fh, pretty=True): sjson = json.JSONEncoder().encode(self.json()) if pretty: json.dump(json.loads(sjson), fh, sort_keys=True, indent=4) else: json.dump(json.loads(sjson), fh) return
Write composite object to file handle in JSON format. Args: fh (file): File handle to write to. pretty (bool): Sort keys and indent in output.
def graph_background(s): if s.background == None: s._ctx.background(None) else: s._ctx.background(s.background) if s.depth: try: clr = colors.color(s.background).darker(0.2) p = s._ctx.rect(0, 0, s._ctx.WIDTH, s._ctx.HEIGHT, draw=False) colors.gradientfill(p, clr, clr.lighter(0.35)) colors.shadow(dx=0, dy=0, blur=2, alpha=0.935, clr=s.background) except: pass
Graph background color.
def md_dimension_info(name, node): def _get_value(child_name): return getattr(node.find(child_name), 'text', None) resolution = _get_value('resolution') defaultValue = node.find("defaultValue") strategy = defaultValue.find("strategy") if defaultValue is not None else None strategy = strategy.text if strategy is not None else None return DimensionInfo( name, _get_value('enabled') == 'true', _get_value('presentation'), int(resolution) if resolution else None, _get_value('units'), _get_value('unitSymbol'), strategy, _get_value('attribute'), _get_value('endAttribute'), _get_value('referenceValue'), _get_value('nearestMatchEnabled') )
Extract metadata Dimension Info from an xml node
def fulladder_gate(variables, vartype=dimod.BINARY, name='FULL_ADDER'): variables = tuple(variables) if vartype is dimod.BINARY: configs = frozenset([(0, 0, 0, 0, 0), (0, 0, 1, 1, 0), (0, 1, 0, 1, 0), (0, 1, 1, 0, 1), (1, 0, 0, 1, 0), (1, 0, 1, 0, 1), (1, 1, 0, 0, 1), (1, 1, 1, 1, 1)]) else: configs = frozenset([(-1, -1, -1, -1, -1), (-1, -1, +1, +1, -1), (-1, +1, -1, +1, -1), (-1, +1, +1, -1, +1), (+1, -1, -1, +1, -1), (+1, -1, +1, -1, +1), (+1, +1, -1, -1, +1), (+1, +1, +1, +1, +1)]) def func(in1, in2, in3, sum_, carry): total = (in1 > 0) + (in2 > 0) + (in3 > 0) if total == 0: return (sum_ <= 0) and (carry <= 0) elif total == 1: return (sum_ > 0) and (carry <= 0) elif total == 2: return (sum_ <= 0) and (carry > 0) elif total == 3: return (sum_ > 0) and (carry > 0) else: raise ValueError("func recieved unexpected values") return Constraint(func, configs, variables, vartype=vartype, name=name)
Full adder. Args: variables (list): Variable labels for the and gate as `[in1, in2, in3, sum, carry]`, where `in1, in2, in3` are inputs to be added and `sum` and 'carry' the resultant outputs. vartype (Vartype, optional, default='BINARY'): Variable type. Accepted input values: * Vartype.SPIN, 'SPIN', {-1, 1} * Vartype.BINARY, 'BINARY', {0, 1} name (str, optional, default='FULL_ADDER'): Name for the constraint. Returns: Constraint(:obj:`.Constraint`): Constraint that is satisfied when its variables are assigned values that match the valid states of a Boolean full adder. Examples: >>> import dwavebinarycsp >>> import dwavebinarycsp.factories.constraint.gates as gates >>> csp = dwavebinarycsp.ConstraintSatisfactionProblem(dwavebinarycsp.BINARY) >>> csp.add_constraint(gates.fulladder_gate(['a', 'b', 'c_in', 'total', 'c_out'], name='FA1')) >>> csp.check({'a': 1, 'b': 0, 'c_in': 1, 'total': 0, 'c_out': 1}) True
def plot_grid(grid_arcsec, array, units, kpc_per_arcsec, pointsize, zoom_offset_arcsec): if grid_arcsec is not None: if zoom_offset_arcsec is not None: grid_arcsec -= zoom_offset_arcsec grid_units = convert_grid_units(grid_arcsec=grid_arcsec, array=array, units=units, kpc_per_arcsec=kpc_per_arcsec) plt.scatter(y=np.asarray(grid_units[:, 0]), x=np.asarray(grid_units[:, 1]), s=pointsize, c='k')
Plot a grid of points over the array of data on the figure. Parameters -----------. grid_arcsec : ndarray or data.array.grids.RegularGrid A grid of (y,x) coordinates in arc-seconds which may be plotted over the array. array : data.array.scaled_array.ScaledArray The 2D array of data which is plotted. units : str The units of the y / x axis of the plots, in arc-seconds ('arcsec') or kiloparsecs ('kpc'). kpc_per_arcsec : float or None The conversion factor between arc-seconds and kiloparsecs, required to plot the units in kpc. grid_pointsize : int The size of the points plotted to show the grid.
def _handleModelRunnerException(jobID, modelID, jobsDAO, experimentDir, logger, e): msg = StringIO.StringIO() print >>msg, "Exception occurred while running model %s: %r (%s)" % ( modelID, e, type(e)) traceback.print_exc(None, msg) completionReason = jobsDAO.CMPL_REASON_ERROR completionMsg = msg.getvalue() logger.error(completionMsg) if type(e) is not InvalidConnectionException: jobsDAO.modelUpdateResults(modelID, results=None, numRecords=0) if type(e) == JobFailException: workerCmpReason = jobsDAO.jobGetFields(jobID, ['workerCompletionReason'])[0] if workerCmpReason == ClientJobsDAO.CMPL_REASON_SUCCESS: jobsDAO.jobSetFields(jobID, fields=dict( cancel=True, workerCompletionReason = ClientJobsDAO.CMPL_REASON_ERROR, workerCompletionMsg = ": ".join(str(i) for i in e.args)), useConnectionID=False, ignoreUnchanged=True) return (completionReason, completionMsg)
Perform standard handling of an exception that occurs while running a model. Parameters: ------------------------------------------------------------------------- jobID: ID for this hypersearch job in the jobs table modelID: model ID jobsDAO: ClientJobsDAO instance experimentDir: directory containing the experiment logger: the logger to use e: the exception that occurred retval: (completionReason, completionMsg)
def included(self, path, is_dir=False): inclusive = None for pattern in self.patterns: if pattern.is_dir == is_dir and pattern.matches(path): inclusive = pattern.inclusive return inclusive
Check patterns in order, last match that includes or excludes `path` wins. Return `None` on undecided.
def upload(self, filename, location=''): current_folder = self._ftp.pwd() self.mkdir(location) self.cd(location) fl = open(filename, 'rb') filename = filename.split('/')[-1] self._ftp.storbinary('STOR %s' % filename, fl) fl.close() self.cd(current_folder)
Uploads a file on the server to the desired location :param filename: the name of the file to be uploaded. :type filename: string :param location: the directory in which the file will be stored. :type location: string
def get_configured_dns(): ips = [] try: output = subprocess.check_output(['nmcli', 'device', 'show']) output = output.decode('utf-8') for line in output.split('\n'): if 'DNS' in line: pattern = r"\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}" for hit in re.findall(pattern, line): ips.append(hit) except FileNotFoundError: pass return ips
Returns the configured DNS servers with the use f nmcli.
def calc_intent(self, query): matches = self.calc_intents(query) if len(matches) == 0: return MatchData('', '') best_match = max(matches, key=lambda x: x.conf) best_matches = (match for match in matches if match.conf == best_match.conf) return min(best_matches, key=lambda x: sum(map(len, x.matches.values())))
Tests all the intents against the query and returns match data of the best intent Args: query (str): Input sentence to test against intents Returns: MatchData: Best intent match
def FlagCxx14Features(filename, clean_lines, linenum, error): line = clean_lines.elided[linenum] include = Match(r'\s* if include and include.group(1) in ('scoped_allocator', 'shared_mutex'): error(filename, linenum, 'build/c++14', 5, ('<%s> is an unapproved C++14 header.') % include.group(1))
Flag those C++14 features that we restrict. Args: filename: The name of the current file. clean_lines: A CleansedLines instance containing the file. linenum: The number of the line to check. error: The function to call with any errors found.
def _cleanup(self): self.exit() workspace = osp.join(os.getcwd(), 'octave-workspace') if osp.exists(workspace): os.remove(workspace)
Clean up resources used by the session.
def _GetNextLogCountPerToken(token): global _log_counter_per_token _log_counter_per_token[token] = 1 + _log_counter_per_token.get(token, -1) return _log_counter_per_token[token]
Wrapper for _log_counter_per_token. Args: token: The token for which to look up the count. Returns: The number of times this function has been called with *token* as an argument (starting at 0)
def write(self, chunk, serialize=False, format=None): self.require_not_closed() if chunk is None: return if serialize or format is not None: self.serialize(chunk, format=format) return if type(chunk) is six.binary_type: self._length += len(chunk) self._stream.write(chunk) elif isinstance(chunk, six.string_types): encoding = self.encoding if encoding is not None: chunk = chunk.encode(encoding) else: raise exceptions.InvalidOperation( 'Attempting to write textual data without an encoding.') self._length += len(chunk) self._stream.write(chunk) elif isinstance(chunk, collections.Iterable): for section in chunk: self.write(section) else: raise exceptions.InvalidOperation( 'Attempting to write something not recognized.')
Writes the given chunk to the output buffer. @param[in] chunk Either a byte array, a unicode string, or a generator. If `chunk` is a generator then calling `self.write(<generator>)` is equivalent to: @code for x in <generator>: self.write(x) self.flush() @endcode @param[in] serialize True to serialize the lines in a determined serializer. @param[in] format A specific format to serialize in; if provided, no detection is done. If not provided, the accept header (as well as the URL extension) is looked at to determine an appropriate serializer.
def grompp_qtot(*args, **kwargs): qtot_pattern = re.compile('System has non-zero total charge: *(?P<qtot>[-+]?\d*\.\d+([eE][-+]\d+)?)') kwargs['stdout'] = False kwargs['stderr'] = False rc, output, error = grompp_warnonly(*args, **kwargs) gmxoutput = "\n".join([x for x in [output, error] if x is not None]) if rc != 0: msg = "grompp_qtot() failed. See warning and screen output for clues." logger.error(msg) import sys sys.stderr.write("=========== grompp (stdout/stderr) ============\n") sys.stderr.write(gmxoutput) sys.stderr.write("===============================================\n") sys.stderr.flush() raise GromacsError(rc, msg) qtot = 0 for line in gmxoutput.split('\n'): m = qtot_pattern.search(line) if m: qtot = float(m.group('qtot')) break logger.info("system total charge qtot = {qtot!r}".format(**vars())) return qtot
Run ``gromacs.grompp`` and return the total charge of the system. :Arguments: The arguments are the ones one would pass to :func:`gromacs.grompp`. :Returns: The total charge as reported Some things to keep in mind: * The stdout output of grompp is only shown when an error occurs. For debugging, look at the log file or screen output and try running the normal :func:`gromacs.grompp` command and analyze the output if the debugging messages are not sufficient. * Check that ``qtot`` is correct. Because the function is based on pattern matching of the informative output of :program:`grompp` it can break when the output format changes. This version recognizes lines like :: ' System has non-zero total charge: -4.000001e+00' using the regular expression :regexp:`System has non-zero total charge: *(?P<qtot>[-+]?\d*\.\d+([eE][-+]\d+)?)`.
def parse_query_string(self, query): if not query: return None if query[0] == '(': index = self.find_closing_braces(query) if index != len(query) - 1: raise Exception("Invalid syntax") else: return self.parse_query_string(query[1:-1]) start_index = query.find("(") if start_index < 0: try: constant = float(query) return constant except ValueError: raise Exception("Invalid syntax") token = query[:start_index] if token not in self.operators: raise Exception("Invalid token: " + token) rest_of_the_query = query[start_index:] braces_end_index = self.find_closing_braces(rest_of_the_query) if braces_end_index != len(rest_of_the_query) - 1: raise Exception("Invalid syntax") parts = self.get_sub_parts(rest_of_the_query[1:-1]) if token == "TS": return self.operators[token](parts) children = [] for part in parts: children.append(self.parse_query_string(part)) node = self.operators[token](children) return node
Returns a parse tree for the query, each of the node is a subclass of Operator. This is both a lexical as well as syntax analyzer step.
def solve_prop(self, goal, reset_method=True): r if self.Tmin is None or self.Tmax is None: raise Exception('Both a minimum and a maximum value are not present indicating there is not enough data for temperature dependency.') if not self.test_property_validity(goal): raise Exception('Input property is not considered plausible; no method would calculate it.') def error(T): if reset_method: self.method = None return self.T_dependent_property(T) - goal try: return brenth(error, self.Tmin, self.Tmax) except ValueError: raise Exception('To within the implemented temperature range, it is not possible to calculate the desired value.')
r'''Method to solve for the temperature at which a property is at a specified value. `T_dependent_property` is used to calculate the value of the property as a function of temperature; if `reset_method` is True, the best method is used at each temperature as the solver seeks a solution. This slows the solution moderately. Checks the given property value with `test_property_validity` first and raises an exception if it is not valid. Requires that Tmin and Tmax have been set to know what range to search within. Search is performed with the brenth solver from SciPy. Parameters ---------- goal : float Propoerty value desired, [`units`] reset_method : bool Whether or not to reset the method as the solver searches Returns ------- T : float Temperature at which the property is the specified value [K]
def _make_prefixed(self, name, is_element, declared_prefixes, declarations): namespace, name = self._split_qname(name, is_element) if namespace is None: prefix = None elif namespace in declared_prefixes: prefix = declared_prefixes[namespace] elif namespace in self._prefixes: prefix = self._prefixes[namespace] declarations[namespace] = prefix declared_prefixes[namespace] = prefix else: if is_element: prefix = None else: prefix = self._make_prefix(declared_prefixes) declarations[namespace] = prefix declared_prefixes[namespace] = prefix if prefix: return prefix + u":" + name else: return name
Return namespace-prefixed tag or attribute name. Add appropriate declaration to `declarations` when neccessary. If no prefix for an element namespace is defined, make the elements namespace default (no prefix). For attributes, make up a prefix in such case. :Parameters: - `name`: QName ('{namespace-uri}local-name') to convert - `is_element`: `True` for element, `False` for an attribute - `declared_prefixes`: mapping of prefixes already declared at this scope - `declarations`: XMLNS declarations on the current element. :Types: - `name`: `unicode` - `is_element`: `bool` - `declared_prefixes`: `unicode` to `unicode` dictionary - `declarations`: `unicode` to `unicode` dictionary :Returntype: `unicode`
def processed_shape(self, shape): for processor in self.preprocessors: shape = processor.processed_shape(shape=shape) return shape
Shape of preprocessed state given original shape. Args: shape: original state shape Returns: processed state shape
def transaction(func): @wraps(func) def wrapper(cls, *args, **kwargs): with (yield from cls.get_cursor(_CursorType.NAMEDTUPLE)) as c: try: yield from c.execute('BEGIN') result = (yield from func(cls, c, *args, **kwargs)) except Exception: yield from c.execute('ROLLBACK') else: yield from c.execute('COMMIT') return result return wrapper
Provides a transacted cursor which will run in autocommit=false mode For any exception the transaction will be rolled back. Requires that the function being decorated is an instance of a class or object that yields a cursor from a get_cursor(cursor_type=CursorType.NAMEDTUPLE) coroutine or provides such an object as the first argument in its signature Yields: A client-side transacted named cursor
def IsBlockInNameSpace(nesting_state, is_forward_declaration): if is_forward_declaration: return len(nesting_state.stack) >= 1 and ( isinstance(nesting_state.stack[-1], _NamespaceInfo)) return (len(nesting_state.stack) > 1 and nesting_state.stack[-1].check_namespace_indentation and isinstance(nesting_state.stack[-2], _NamespaceInfo))
Checks that the new block is directly in a namespace. Args: nesting_state: The _NestingState object that contains info about our state. is_forward_declaration: If the class is a forward declared class. Returns: Whether or not the new block is directly in a namespace.
def getAccountFromPrivateKey(self, wif): pub = self.publickey_from_wif(wif) return self.getAccountFromPublicKey(pub)
Obtain account name from private key
def _make_session(connection: Optional[str] = None) -> Session: if connection is None: connection = get_global_connection() engine = create_engine(connection) create_all(engine) session_cls = sessionmaker(bind=engine) session = session_cls() return session
Make a session.
def less_or_equal(a, b, *args): return ( less(a, b) or soft_equals(a, b) ) and (not args or less_or_equal(b, *args))
Implements the '<=' operator with JS-style type coertion.
def find_external_compartment(model): if model.boundary: counts = pd.Series(tuple(r.compartments)[0] for r in model.boundary) most = counts.value_counts() most = most.index[most == most.max()].to_series() else: most = None like_external = compartment_shortlist["e"] + ["e"] matches = pd.Series([co in like_external for co in model.compartments], index=model.compartments) if matches.sum() == 1: compartment = matches.index[matches][0] LOGGER.info("Compartment `%s` sounds like an external compartment. " "Using this one without counting boundary reactions" % compartment) return compartment elif most is not None and matches.sum() > 1 and matches[most].sum() == 1: compartment = most[matches[most]][0] LOGGER.warning("There are several compartments that look like an " "external compartment but `%s` has the most boundary " "reactions, so using that as the external " "compartment." % compartment) return compartment elif matches.sum() > 1: raise RuntimeError("There are several compartments (%s) that look " "like external compartments but we can't tell " "which one to use. Consider renaming your " "compartments please.") if most is not None: return most[0] LOGGER.warning("Could not identify an external compartment by name and" " choosing one with the most boundary reactions. That " "might be complete nonsense or change suddenly. " "Consider renaming your compartments using " "`Model.compartments` to fix this.") raise RuntimeError("The heuristic for discovering an external compartment " "relies on names and boundary reactions. Yet, there " "are neither compartments with recognized names nor " "boundary reactions in the model.")
Find the external compartment in the model. Uses a simple heuristic where the external compartment should be the one with the most exchange reactions. Arguments --------- model : cobra.Model A cobra model. Returns ------- str The putative external compartment.
def _nginx_location_spec(port_spec, bridge_ip): location_string_spec = "\t \t location / { \n" for location_setting in ['proxy_http_version 1.1;', 'proxy_set_header Upgrade $http_upgrade;', 'proxy_set_header Connection "upgrade";', 'proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;', 'proxy_set_header Host $http_host;', _nginx_proxy_string(port_spec, bridge_ip)]: location_string_spec += "\t \t \t {} \n".format(location_setting) location_string_spec += "\t \t } \n" return location_string_spec
This will output the nginx location config string for specific port spec
def update_extend(dst, src): for k, v in src.items(): existing = dst.setdefault(k, []) for x in v: if x not in existing: existing.append(x)
Update the `dst` with the `src`, extending values where lists. Primiarily useful for integrating results from `get_library_config`.
def _fixupRandomEncoderParams(params, minVal, maxVal, minResolution): encodersDict = ( params["modelConfig"]["modelParams"]["sensorParams"]["encoders"] ) for encoder in encodersDict.itervalues(): if encoder is not None: if encoder["type"] == "RandomDistributedScalarEncoder": resolution = max(minResolution, (maxVal - minVal) / encoder.pop("numBuckets") ) encodersDict["c1"]["resolution"] = resolution
Given model params, figure out the correct parameters for the RandomDistributed encoder. Modifies params in place.
def set_stream(self,stream): _unused = stream if self.joined and self.handler: self.handler.user_left(self.me,None) self.joined=False
Called when current stream changes. Mark the room not joined and inform `self.handler` that it was left. :Parameters: - `stream`: the new stream. :Types: - `stream`: `pyxmpp.stream.Stream`
def set_state(_id, body): url = DEVICE_URL % _id if "mode" in body: url = MODES_URL % _id arequest = requests.put(url, headers=HEADERS, data=json.dumps(body)) status_code = str(arequest.status_code) if status_code != '202': _LOGGER.error("State not accepted. " + status_code) return False
Set a devices state.
def upload_s3(cfg, path_to_zip_file, *use_s3): print('Uploading your new Lambda function') profile_name = cfg.get('profile') aws_access_key_id = cfg.get('aws_access_key_id') aws_secret_access_key = cfg.get('aws_secret_access_key') client = get_client( 's3', profile_name, aws_access_key_id, aws_secret_access_key, cfg.get('region'), ) byte_stream = b'' with open(path_to_zip_file, mode='rb') as fh: byte_stream = fh.read() s3_key_prefix = cfg.get('s3_key_prefix', '/dist') checksum = hashlib.new('md5', byte_stream).hexdigest() timestamp = str(time.time()) filename = '{prefix}{checksum}-{ts}.zip'.format( prefix=s3_key_prefix, checksum=checksum, ts=timestamp, ) buck_name = ( os.environ.get('S3_BUCKET_NAME') or cfg.get('bucket_name') ) func_name = ( os.environ.get('LAMBDA_FUNCTION_NAME') or cfg.get('function_name') ) kwargs = { 'Bucket': '{}'.format(buck_name), 'Key': '{}'.format(filename), 'Body': byte_stream, } client.put_object(**kwargs) print('Finished uploading {} to S3 bucket {}'.format(func_name, buck_name)) if use_s3: return filename
Upload a function to AWS S3.
def log_file(self, url=None): if url is None: url = self.url f = re.sub("file://", "", url) try: with open(f, "a") as of: of.write(str(self.store.get_json_tuples(True))) except IOError as e: print(e) print("Could not write the content to the file..")
Write to a local log file
def build_dag(data, samples): snames = [i.name for i in samples] dag = nx.DiGraph() joborder = JOBORDER[data.paramsdict["assembly_method"]] for sname in snames: for func in joborder: dag.add_node("{}-{}-{}".format(func, 0, sname)) for chunk in xrange(10): dag.add_node("{}-{}-{}".format("muscle_align", chunk, sname)) dag.add_node("{}-{}-{}".format("reconcat", 0, sname)) for sname in snames: for sname2 in snames: dag.add_edge("{}-{}-{}".format(joborder[0], 0, sname2), "{}-{}-{}".format(joborder[1], 0, sname)) for idx in xrange(2, len(joborder)): dag.add_edge("{}-{}-{}".format(joborder[idx-1], 0, sname), "{}-{}-{}".format(joborder[idx], 0, sname)) for sname2 in snames: for chunk in range(10): dag.add_edge("{}-{}-{}".format("muscle_chunker", 0, sname2), "{}-{}-{}".format("muscle_align", chunk, sname)) dag.add_edge("{}-{}-{}".format("muscle_align", chunk, sname), "{}-{}-{}".format("reconcat", 0, sname)) return dag, joborder
build a directed acyclic graph describing jobs to be run in order.