code
stringlengths
59
4.4k
docstring
stringlengths
5
7.69k
def get_single_header(headers, key): raw_headers = headers.getRawHeaders(key) if raw_headers is None: return None header, _ = cgi.parse_header(raw_headers[-1]) return header
Get a single value for the given key out of the given set of headers. :param twisted.web.http_headers.Headers headers: The set of headers in which to look for the header value :param str key: The header key
def update(self, *args, **kwargs): super(Deposit, self).update(*args, **kwargs)
Update only drafts. Status required: ``'draft'``. Meta information inside `_deposit` are preserved.
def aggregate_tree(l_tree): def _aggregate_phase1(tree): n_tree = radix.Radix() for prefix in tree.prefixes(): if tree.search_worst(prefix).prefix == prefix: n_tree.add(prefix) return n_tree def _aggregate_phase2(tree): n_tree = radix.Radix() for rnode in tree: p = text(ip_network(text(rnode.prefix)).supernet()) r = tree.search_covered(p) if len(r) == 2: if r[0].prefixlen == r[1].prefixlen == rnode.prefixlen: n_tree.add(p) else: n_tree.add(rnode.prefix) else: n_tree.add(rnode.prefix) return n_tree l_tree = _aggregate_phase1(l_tree) if len(l_tree.prefixes()) == 1: return l_tree while True: r_tree = _aggregate_phase2(l_tree) if l_tree.prefixes() == r_tree.prefixes(): break else: l_tree = r_tree del r_tree return l_tree
Walk a py-radix tree and aggregate it. Arguments l_tree -- radix.Radix() object
def switch(template, version): temple.update.update(new_template=template, new_version=version)
Switch a project's template to a different template.
def send_status_response(environ, start_response, e, add_headers=None, is_head=False): status = get_http_status_string(e) headers = [] if add_headers: headers.extend(add_headers) if e in (HTTP_NOT_MODIFIED, HTTP_NO_CONTENT): start_response( status, [("Content-Length", "0"), ("Date", get_rfc1123_time())] + headers ) return [b""] if e in (HTTP_OK, HTTP_CREATED): e = DAVError(e) assert isinstance(e, DAVError) content_type, body = e.get_response_page() if is_head: body = compat.b_empty assert compat.is_bytes(body), body start_response( status, [ ("Content-Type", content_type), ("Date", get_rfc1123_time()), ("Content-Length", str(len(body))), ] + headers, ) return [body]
Start a WSGI response for a DAVError or status code.
def removeTopology(self, topology_name, state_manager_name): topologies = [] for top in self.topologies: if (top.name == topology_name and top.state_manager_name == state_manager_name): if (topology_name, state_manager_name) in self.topologyInfos: self.topologyInfos.pop((topology_name, state_manager_name)) else: topologies.append(top) self.topologies = topologies
Removes the topology from the local cache.
def encoder_data(self, data): prev_val = self.digital_response_table[data[self.RESPONSE_TABLE_MODE]][self.RESPONSE_TABLE_PIN_DATA_VALUE] val = int((data[self.MSB] << 7) + data[self.LSB]) if val > 8192: val -= 16384 pin = data[0] with self.pymata.data_lock: self.digital_response_table[data[self.RESPONSE_TABLE_MODE]][self.RESPONSE_TABLE_PIN_DATA_VALUE] = val if prev_val != val: callback = self.digital_response_table[pin][self.RESPONSE_TABLE_CALLBACK] if callback is not None: callback([self.pymata.ENCODER, pin, self.digital_response_table[pin][self.RESPONSE_TABLE_PIN_DATA_VALUE]])
This method handles the incoming encoder data message and stores the data in the digital response table. :param data: Message data from Firmata :return: No return value.
def _dot_to_dec(ip, check=True): if check and not is_dot(ip): raise ValueError('_dot_to_dec: invalid IP: "%s"' % ip) octets = str(ip).split('.') dec = 0 dec |= int(octets[0]) << 24 dec |= int(octets[1]) << 16 dec |= int(octets[2]) << 8 dec |= int(octets[3]) return dec
Dotted decimal notation to decimal conversion.
def initialize(self, containers): self._containers = deepcopy(containers) self.__write(containers, initialize=True)
Initialize a new state file with the given contents. This function fails in case the state file already exists.
def is_quiet(self): level = self._conversation.self_conversation_state.notification_level return level == hangouts_pb2.NOTIFICATION_LEVEL_QUIET
``True`` if notification level for this conversation is quiet.
def create_dashboard(self, panel_file, data_sources=None, strict=True): es_enrich = self.conf['es_enrichment']['url'] kibana_url = self.conf['panels']['kibiter_url'] mboxes_sources = set(['pipermail', 'hyperkitty', 'groupsio', 'nntp']) if data_sources and any(x in data_sources for x in mboxes_sources): data_sources = list(data_sources) data_sources.append('mbox') if data_sources and ('supybot' in data_sources): data_sources = list(data_sources) data_sources.append('irc') if data_sources and 'google_hits' in data_sources: data_sources = list(data_sources) data_sources.append('googlehits') if data_sources and 'stackexchange' in data_sources: data_sources = list(data_sources) data_sources.append('stackoverflow') if data_sources and 'phabricator' in data_sources: data_sources = list(data_sources) data_sources.append('maniphest') try: import_dashboard(es_enrich, kibana_url, panel_file, data_sources=data_sources, strict=strict) except ValueError: logger.error("%s does not include release field. Not loading the panel.", panel_file) except RuntimeError: logger.error("Can not load the panel %s", panel_file)
Upload a panel to Elasticsearch if it does not exist yet. If a list of data sources is specified, upload only those elements (visualizations, searches) that match that data source. :param panel_file: file name of panel (dashobard) to upload :param data_sources: list of data sources :param strict: only upload a dashboard if it is newer than the one already existing
def get_constructor_arguments(self) -> str: item = self._constructor_abi_item return '()' if item is None else self.tuple_signature_for_components(item['inputs'])
Returns the tuple type signature for the arguments of the contract constructor.
def _get_operation_input_field_values(self, metadata, file_input): input_args = metadata['request']['ephemeralPipeline']['inputParameters'] vals_dict = metadata['request']['pipelineArgs']['inputs'] names = [ arg['name'] for arg in input_args if ('localCopy' in arg) == file_input ] return {name: vals_dict[name] for name in names if name in vals_dict}
Returns a dictionary of envs or file inputs for an operation. Args: metadata: operation metadata field file_input: True to return a dict of file inputs, False to return envs. Returns: A dictionary of input field name value pairs
def view_maker(self, name, instance=None): if instance is None: instance = self sig = "lang" in [ parameter.name for parameter in inspect.signature(getattr(instance, name)).parameters.values() ] def route(**kwargs): if sig and "lang" not in kwargs: kwargs["lang"] = self.get_locale() if "semantic" in kwargs: del kwargs["semantic"] return self.route(getattr(instance, name), **kwargs) return route
Create a view :param name: Name of the route function to use for the view. :type name: str :return: Route function which makes use of Nemo context (such as menu informations) :rtype: function
def _get_container_port_mappings(app): container = app['container'] port_mappings = container.get('portMappings') if port_mappings is None and 'docker' in container: port_mappings = container['docker'].get('portMappings') return port_mappings
Get the ``portMappings`` field for the app container.
def clear(self): self._compound_masses = self._compound_masses * 0.0 self._P = 1.0 self._T = 25.0 self._H = 0.0
Set all the compound masses in the package to zero. Set the pressure to 1, the temperature to 25 and the enthalpy to zero.
def MessageToJson(message, including_default_value_fields=False): js = _MessageToJsonObject(message, including_default_value_fields) return json.dumps(js, indent=2)
Converts protobuf message to JSON format. Args: message: The protocol buffers message instance to serialize. including_default_value_fields: If True, singular primitive fields, repeated fields, and map fields will always be serialized. If False, only serialize non-empty fields. Singular message fields and oneof fields are not affected by this option. Returns: A string containing the JSON formatted protocol buffer message.
def _keep_alive_thread(self): while True: with self._lock: if self.connected(): self._ws.ping() else: self.disconnect() self._thread = None return sleep(30)
Used exclusively as a thread which keeps the WebSocket alive.
def scaffolds_to_contigs(infile, outfile, number_contigs=False): seq_reader = sequences.file_reader(infile) fout = utils.open_file_write(outfile) for seq in seq_reader: contigs = seq.contig_coords() counter = 1 for contig in contigs: if number_contigs: name = seq.id + '.' + str(counter) counter += 1 else: name = '.'.join([seq.id, str(contig.start + 1), str(contig.end + 1)]) print(sequences.Fasta(name, seq[contig.start:contig.end+1]), file=fout) utils.close(fout)
Makes a file of contigs from scaffolds by splitting at every N. Use number_contigs=True to add .1, .2, etc onto end of each contig, instead of default to append coordinates.
def parseStringList(s): assert isinstance(s, basestring) return [int(i) for i in s.split()]
Parse a string of space-separated numbers, returning a Python list. :param s: (string) to parse :returns: (list) binary SDR
def _camelcase_to_underscore(url): def upper2underscore(text): for char in text: if char.islower(): yield char else: yield '_' if char.isalpha(): yield char.lower() return ''.join(upper2underscore(url))
Translate camelCase into underscore format. >>> _camelcase_to_underscore('minutesBetweenSummaries') 'minutes_between_summaries'
def add_loopless(model, zero_cutoff=None): zero_cutoff = normalize_cutoff(model, zero_cutoff) internal = [i for i, r in enumerate(model.reactions) if not r.boundary] s_int = create_stoichiometric_matrix(model)[:, numpy.array(internal)] n_int = nullspace(s_int).T max_bound = max(max(abs(b) for b in r.bounds) for r in model.reactions) prob = model.problem to_add = [] for i in internal: rxn = model.reactions[i] indicator = prob.Variable("indicator_" + rxn.id, type="binary") on_off_constraint = prob.Constraint( rxn.flux_expression - max_bound * indicator, lb=-max_bound, ub=0, name="on_off_" + rxn.id) delta_g = prob.Variable("delta_g_" + rxn.id) delta_g_range = prob.Constraint( delta_g + (max_bound + 1) * indicator, lb=1, ub=max_bound, name="delta_g_range_" + rxn.id) to_add.extend([indicator, on_off_constraint, delta_g, delta_g_range]) model.add_cons_vars(to_add) for i, row in enumerate(n_int): name = "nullspace_constraint_" + str(i) nullspace_constraint = prob.Constraint(Zero, lb=0, ub=0, name=name) model.add_cons_vars([nullspace_constraint]) coefs = {model.variables[ "delta_g_" + model.reactions[ridx].id]: row[i] for i, ridx in enumerate(internal) if abs(row[i]) > zero_cutoff} model.constraints[name].set_linear_coefficients(coefs)
Modify a model so all feasible flux distributions are loopless. In most cases you probably want to use the much faster `loopless_solution`. May be used in cases where you want to add complex constraints and objecives (for instance quadratic objectives) to the model afterwards or use an approximation of Gibbs free energy directions in you model. Adds variables and constraints to a model which will disallow flux distributions with loops. The used formulation is described in [1]_. This function *will* modify your model. Parameters ---------- model : cobra.Model The model to which to add the constraints. zero_cutoff : positive float, optional Cutoff used for null space. Coefficients with an absolute value smaller than `zero_cutoff` are considered to be zero (default model.tolerance). Returns ------- Nothing References ---------- .. [1] Elimination of thermodynamically infeasible loops in steady-state metabolic models. Schellenberger J, Lewis NE, Palsson BO. Biophys J. 2011 Feb 2;100(3):544-53. doi: 10.1016/j.bpj.2010.12.3707. Erratum in: Biophys J. 2011 Mar 2;100(5):1381.
def many_to_one(clsname, **kw): @declared_attr def m2o(cls): cls._references((cls.__name__, clsname)) return relationship(clsname, **kw) return m2o
Use an event to build a many-to-one relationship on a class. This makes use of the :meth:`.References._reference_table` method to generate a full foreign key relationship to the remote table.
def SyntheticRestaurant(n=20): "Generate a DataSet with n examples." def gen(): example = map(random.choice, restaurant.values) example[restaurant.target] = Fig[18,2](example) return example return RestaurantDataSet([gen() for i in range(n)])
Generate a DataSet with n examples.
def merge_to_one_seq(infile, outfile, seqname='union'): seq_reader = sequences.file_reader(infile) seqs = [] for seq in seq_reader: seqs.append(copy.copy(seq)) new_seq = ''.join([seq.seq for seq in seqs]) if type(seqs[0]) == sequences.Fastq: new_qual = ''.join([seq.qual for seq in seqs]) seqs[:] = [] merged = sequences.Fastq(seqname, new_seq, new_qual) else: merged = sequences.Fasta(seqname, new_seq) seqs[:] = [] f = utils.open_file_write(outfile) print(merged, file=f) utils.close(f)
Takes a multi fasta or fastq file and writes a new file that contains just one sequence, with the original sequences catted together, preserving their order
def decorator(decorator_func): assert callable(decorator_func), type(decorator_func) def _decorator(func=None, **kwargs): assert func is None or callable(func), type(func) if func: return decorator_func(func, **kwargs) else: def _decorator_helper(func): return decorator_func(func, **kwargs) return _decorator_helper return _decorator
Allows a decorator to be called with or without keyword arguments.
def _matrix2dict(matrix, etype=False): n = len(matrix) adj = {k: {} for k in range(n)} for k in range(n): for j in range(n): if matrix[k, j] != 0: adj[k][j] = {} if not etype else matrix[k, j] return adj
Takes an adjacency matrix and returns an adjacency list.
def get_or_default(func=None, default=None): def decorator(func): @wraps(func) def wrapper(*args, **kwargs): try: return func(*args, **kwargs) except ObjectDoesNotExist: if callable(default): return default() else: return default return wrapper if func is None: return decorator else: return decorator(func)
Wrapper around Django's ORM `get` functionality. Wrap anything that raises ObjectDoesNotExist exception and provide the default value if necessary. `default` by default is None. `default` can be any callable, if it is callable it will be called when ObjectDoesNotExist exception will be raised.
def _expand_consumed_mesh(self, mesh, mesh_index, row_position, passed): if not mesh.is_produced(): return row = mesh.producing_row position = Point( row_position.x + mesh.index_in_producing_row - mesh_index, row_position.y - INSTRUCTION_HEIGHT ) self._expand(row, position, passed)
expand the consumed meshes
def register_timer_task_in_sec(self, task, second): second_in_float = float(second) expiration = time.time() + second_in_float heappush(self.timer_tasks, (expiration, task))
Registers a new timer task :param task: function to be run at a specified second from now :param second: how many seconds to wait before the timer is triggered
def fmt(a, b): return 100 * np.min([a, b], axis=0).sum() / np.max([a, b], axis=0).sum()
Figure of merit in time
def submit(course, tid=None, pastebin=False, review=False): if tid is not None: return submit_exercise(Exercise.byid(tid), pastebin=pastebin, request_review=review) else: sel = Exercise.get_selected() if not sel: raise NoExerciseSelected() return submit_exercise(sel, pastebin=pastebin, request_review=review)
Submit the selected exercise to the server.
def _get_warped_array( input_file=None, indexes=None, dst_bounds=None, dst_shape=None, dst_crs=None, resampling=None, src_nodata=None, dst_nodata=None ): try: return _rasterio_read( input_file=input_file, indexes=indexes, dst_bounds=dst_bounds, dst_shape=dst_shape, dst_crs=dst_crs, resampling=resampling, src_nodata=src_nodata, dst_nodata=dst_nodata ) except Exception as e: logger.exception("error while reading file %s: %s", input_file, e) raise
Extract a numpy array from a raster file.
def fromordinal(cls, n): y, m, d = _ord2ymd(n) return cls(y, m, d)
Contruct a date from a proleptic Gregorian ordinal. January 1 of year 1 is day 1. Only the year, month and day are non-zero in the result.
def get_own_ip(): own_ip = None interfaces = psutil.net_if_addrs() for _, details in interfaces.items(): for detail in details: if detail.family == socket.AF_INET: ip_address = ipaddress.ip_address(detail.address) if not (ip_address.is_link_local or ip_address.is_loopback): own_ip = str(ip_address) break return own_ip
Gets the IP from the inet interfaces.
def dist_abs( self, src, tar, weights='exponential', max_length=8, normalized=False ): xored = eudex(src, max_length=max_length) ^ eudex( tar, max_length=max_length ) if not weights: binary = bin(xored) distance = binary.count('1') if normalized: return distance / (len(binary) - 2) return distance if callable(weights): weights = weights() elif weights == 'exponential': weights = Eudex.gen_exponential() elif weights == 'fibonacci': weights = Eudex.gen_fibonacci() if isinstance(weights, GeneratorType): weights = [next(weights) for _ in range(max_length)][::-1] distance = 0 max_distance = 0 while (xored or normalized) and weights: max_distance += 8 * weights[-1] distance += bin(xored & 0xFF).count('1') * weights.pop() xored >>= 8 if normalized: distance /= max_distance return distance
Calculate the distance between the Eudex hashes of two terms. Parameters ---------- src : str Source string for comparison tar : str Target string for comparison weights : str, iterable, or generator function The weights or weights generator function - If set to ``None``, a simple Hamming distance is calculated. - If set to ``exponential``, weight decays by powers of 2, as proposed in the eudex specification: https://github.com/ticki/eudex. - If set to ``fibonacci``, weight decays through the Fibonacci series, as in the eudex reference implementation. - If set to a callable function, this assumes it creates a generator and the generator is used to populate a series of weights. - If set to an iterable, the iterable's values should be integers and will be used as the weights. max_length : int The number of characters to encode as a eudex hash normalized : bool Normalizes to [0, 1] if True Returns ------- int The Eudex Hamming distance Examples -------- >>> cmp = Eudex() >>> cmp.dist_abs('cat', 'hat') 128 >>> cmp.dist_abs('Niall', 'Neil') 2 >>> cmp.dist_abs('Colin', 'Cuilen') 10 >>> cmp.dist_abs('ATCG', 'TAGC') 403 >>> cmp.dist_abs('cat', 'hat', weights='fibonacci') 34 >>> cmp.dist_abs('Niall', 'Neil', weights='fibonacci') 2 >>> cmp.dist_abs('Colin', 'Cuilen', weights='fibonacci') 7 >>> cmp.dist_abs('ATCG', 'TAGC', weights='fibonacci') 117 >>> cmp.dist_abs('cat', 'hat', weights=None) 1 >>> cmp.dist_abs('Niall', 'Neil', weights=None) 1 >>> cmp.dist_abs('Colin', 'Cuilen', weights=None) 2 >>> cmp.dist_abs('ATCG', 'TAGC', weights=None) 9 >>> # Using the OEIS A000142: >>> cmp.dist_abs('cat', 'hat', [1, 1, 2, 6, 24, 120, 720, 5040]) 1 >>> cmp.dist_abs('Niall', 'Neil', [1, 1, 2, 6, 24, 120, 720, 5040]) 720 >>> cmp.dist_abs('Colin', 'Cuilen', ... [1, 1, 2, 6, 24, 120, 720, 5040]) 744 >>> cmp.dist_abs('ATCG', 'TAGC', [1, 1, 2, 6, 24, 120, 720, 5040]) 6243
def tradepile(self): method = 'GET' url = 'tradepile' rc = self.__request__(method, url) events = [self.pin.event('page_view', 'Hub - Transfers'), self.pin.event('page_view', 'Transfer List - List View')] if rc.get('auctionInfo'): events.append(self.pin.event('page_view', 'Item - Detail View')) self.pin.send(events) return [itemParse(i) for i in rc.get('auctionInfo', ())]
Return items in tradepile.
def get_all_certificates(self): data = self.get_data("certificates") certificates = list() for jsoned in data['certificates']: cert = Certificate(**jsoned) cert.token = self.token certificates.append(cert) return certificates
This function returns a list of Certificate objects.
def create_jwt(integration_id, private_key_path): integration_id = int(integration_id) with open(private_key_path, 'rb') as f: cert_bytes = f.read() now = datetime.datetime.now() expiration_time = now + datetime.timedelta(minutes=9) payload = { 'iat': int(now.timestamp()), 'exp': int(expiration_time.timestamp()), 'iss': integration_id } return jwt.encode(payload, cert_bytes, algorithm='RS256')
Create a JSON Web Token to authenticate a GitHub Integration or installation. Parameters ---------- integration_id : `int` Integration ID. This is available from the GitHub integration's homepage. private_key_path : `str` Path to the integration's private key (a ``.pem`` file). Returns ------- jwt : `bytes` JSON Web Token that is good for 9 minutes. Notes ----- The JWT is encoded with the RS256 algorithm. It includes a payload with fields: - ``'iat'``: The current time, as an `int` timestamp. - ``'exp'``: Expiration time, as an `int timestamp. The expiration time is set of 9 minutes in the future (maximum allowance is 10 minutes). - ``'iss'``: The integration ID (`int`). For more information, see https://developer.github.com/early-access/integrations/authentication/.
def allow_request(self, request, view): service_users = get_service_usernames() if request.user.username in service_users: self.update_throttle_scope() return super(ServiceUserThrottle, self).allow_request(request, view)
Modify throttling for service users. Updates throttling rate if the request is coming from the service user, and defaults to UserRateThrottle's configured setting otherwise. Updated throttling rate comes from `DEFAULT_THROTTLE_RATES` key in `REST_FRAMEWORK` setting. service user throttling is specified in `DEFAULT_THROTTLE_RATES` by `service_user` key Example Setting: ``` REST_FRAMEWORK = { ... 'DEFAULT_THROTTLE_RATES': { ... 'service_user': '50/day' } } ```
def map(self, map_function): from heronpy.streamlet.impl.mapbolt import MapStreamlet map_streamlet = MapStreamlet(map_function, self) self._add_child(map_streamlet) return map_streamlet
Return a new Streamlet by applying map_function to each element of this Streamlet.
def connect(self): try: logger.info(u'Connecting %s:%d' % (self.host, self.port)) self.sock.connect((self.host, self.port)) except socket.error: raise ConnectionError() self.state = CONNECTED
Connect to the server :raise ConnectionError: If socket cannot establish a connection
def _get_algorithm_info(self, algorithm_info): if algorithm_info['algorithm'] not in self.ALGORITHMS: raise Exception('Algorithm not supported: %s' % algorithm_info['algorithm']) algorithm = self.ALGORITHMS[algorithm_info['algorithm']] algorithm_info.update(algorithm) return algorithm_info
Get algorithm info
def path_to_node(tree, path): if path is None: return None node = tree for key in path: node = child_by_key(node, key) return node
FST node located at the given path
def INC(cpu, dest): arg0 = dest.read() res = dest.write(arg0 + 1) res &= (1 << dest.size) - 1 SIGN_MASK = 1 << (dest.size - 1) cpu.AF = ((arg0 ^ 1) ^ res) & 0x10 != 0 cpu.ZF = res == 0 cpu.SF = (res & SIGN_MASK) != 0 cpu.OF = res == SIGN_MASK cpu.PF = cpu._calculate_parity_flag(res)
Increments by 1. Adds 1 to the destination operand, while preserving the state of the CF flag. The destination operand can be a register or a memory location. This instruction allows a loop counter to be updated without disturbing the CF flag. (Use a ADD instruction with an immediate operand of 1 to perform an increment operation that does updates the CF flag.):: DEST = DEST +1; :param cpu: current CPU. :param dest: destination operand.
def i2c_stop_reading(self, address): data = [address, self.I2C_STOP_READING] self._command_handler.send_sysex(self._command_handler.I2C_REQUEST, data)
This method stops an I2C_READ_CONTINUOUSLY operation for the i2c device address specified. :param address: address of i2c device
def get_last_commit(git_path=None): if git_path is None: git_path = GIT_PATH line = get_last_commit_line(git_path) revision_id = line.split()[1] return revision_id
Get the HEAD commit SHA1 of repository in current dir.
def file_serializer(obj): return { "id": str(obj.file_id), "filename": obj.key, "filesize": obj.file.size, "checksum": obj.file.checksum, }
Serialize a object. :param obj: A :class:`invenio_files_rest.models.ObjectVersion` instance. :returns: A dictionary with the fields to serialize.
def _keywords(self): meta = self.find("meta", {"name":"keywords"}) if isinstance(meta, dict) and \ meta.has_key("content"): keywords = [k.strip() for k in meta["content"].split(",")] else: keywords = [] return keywords
Returns the meta keywords in the page.
def update_throttle_scope(self): self.scope = SERVICE_USER_SCOPE self.rate = self.get_rate() self.num_requests, self.duration = self.parse_rate(self.rate)
Update throttle scope so that service user throttle rates are applied.
def predict_proba(self, X): check_is_fitted(self, ['inverse_influence_matrix']) X = check_array(X) return self.__find_leverages(X, self.inverse_influence_matrix)
Predict the distances for X to center of the training set. Parameters ---------- X : array-like or sparse matrix, shape (n_samples, n_features) The input samples. Internally, it will be converted to ``dtype=np.float32`` and if a sparse matrix is provided to a sparse ``csr_matrix``. Returns ------- leverages: array of shape = [n_samples] The objects distances to center of the training set.
def getch(): try: termios.tcsetattr(_fd, termios.TCSANOW, _new_settings) ch = sys.stdin.read(1) finally: termios.tcsetattr(_fd, termios.TCSADRAIN, _old_settings) return ch
get character. waiting for key
def _in_git_repo(): ret = temple.utils.shell('git rev-parse', stderr=subprocess.DEVNULL, check=False) return ret.returncode == 0
Returns True if inside a git repo, False otherwise
def smooth_fwhm(self, fwhm): if fwhm != self._smooth_fwhm: self._is_data_smooth = False self._smooth_fwhm = fwhm
Set a smoothing Gaussian kernel given its FWHM in mm.
def record_delete_subfield(rec, tag, subfield_code, ind1=' ', ind2=' '): ind1, ind2 = _wash_indicators(ind1, ind2) for field in rec.get(tag, []): if field[1] == ind1 and field[2] == ind2: field[0][:] = [subfield for subfield in field[0] if subfield_code != subfield[0]]
Delete all subfields with subfield_code in the record.
def sendCommands(comPort, commands): mutex.acquire() try: try: port = serial.Serial(port=comPort) header = '11010101 10101010' footer = '10101101' for command in _translateCommands(commands): _sendBinaryData(port, header + command + footer) except serial.SerialException: print('Unable to open serial port %s' % comPort) print('') raise finally: mutex.release()
Send X10 commands using the FireCracker on comPort comPort should be the name of a serial port on the host platform. On Windows, for example, 'com1'. commands should be a string consisting of X10 commands separated by commas. For example. 'A1 On, A Dim, A Dim, A Dim, A Lamps Off'. The letter is a house code (A-P) and the number is the device number (1-16). Possible commands for a house code / device number combination are 'On' and 'Off'. The commands 'Bright' and 'Dim' should be used with a house code alone after sending an On command to a specific device. The 'All On', 'All Off', 'Lamps On', and 'Lamps Off' commands should also be used with a house code alone. # Turn on module A1 >>> sendCommands('com1', 'A1 On') # Turn all modules with house code A off >>> sendCommands('com1', 'A All Off') # Turn all lamp modules with house code B on >>> sendCommands('com1', 'B Lamps On') # Turn on module A1 and dim it 3 steps, then brighten it 1 step >>> sendCommands('com1', 'A1 On, A Dim, A Dim, A Dim, A Bright')
def save_traceback(err): dirname = safe_path(os.path.expanduser( os.path.join('~', '.{0}'.format(__script__)) )) if not os.path.isdir(dirname): os.mkdir(dirname) filename = os.path.join(dirname, '{0}.log'.format(__script__)) with open(filename, 'a+') as handler: traceback.print_exc(file=handler) message = ('User aborted workflow' if isinstance(err, KeyboardInterrupt) else 'Unexpected error catched') print_error(message) print_error('Full log stored to {0}'.format(filename), False) return True
Save error traceback to bootstrapper log file. :param err: Catched exception.
def configure_modrpaf(self): r = self.local_renderer if r.env.modrpaf_enabled: self.install_packages() self.enable_mod('rpaf') else: if self.last_manifest.modrpaf_enabled: self.disable_mod('mod_rpaf')
Installs the mod-rpaf Apache module. https://github.com/gnif/mod_rpaf
def _plugin_endpoint_rename(fn_name, instance): if instance and instance.namespaced: fn_name = "r_{0}_{1}".format(instance.name, fn_name[2:]) return fn_name
Rename endpoint function name to avoid conflict when namespacing is set to true :param fn_name: Name of the route function :param instance: Instance bound to the function :return: Name of the new namespaced function name
def get_directory(self, path_to_directory, timeout=30, backoff=0.4, max_wait=4): response = None started_at = None time_elapsed = 0 i = 0 while time_elapsed < timeout: response = self._get('{0}.zip'.format(path_to_directory)) if response: break else: if started_at is None: started_at = time.time() time.sleep(min(backoff * (2 ** i), max_wait)) i += 1 time_elapsed = time.time() - started_at return response
Gets an artifact directory by its path. See the `Go artifact directory documentation`__ for example responses. .. __: http://api.go.cd/current/#get-artifact-directory .. note:: Getting a directory relies on Go creating a zip file of the directory in question. Because of this Go will zip the file in the background and return a 202 Accepted response. It's then up to the client to check again later and get the final file. To work with normal assumptions this :meth:`get_directory` will retry itself up to ``timeout`` seconds to get a 200 response to return. At that point it will then return the response as is, no matter whether it's still 202 or 200. The retry is done with an exponential backoff with a max value between retries. See the ``backoff`` and ``max_wait`` variables. If you want to handle the retry logic yourself then use :meth:`get` and add '.zip' as a suffix on the directory. Args: path_to_directory (str): The path to the directory to get. It can be nested eg ``target/dist.zip`` timeout (int): How many seconds we will wait in total for a successful response from Go when we're receiving 202 backoff (float): The initial value used for backoff, raises exponentially until it reaches ``max_wait`` max_wait (int): The max time between retries Returns: Response: :class:`gocd.api.response.Response` object A successful response is a zip-file.
def update(dst, src): stack = [(dst, src)] def isdict(o): return hasattr(o, 'keys') while stack: current_dst, current_src = stack.pop() for key in current_src: if key not in current_dst: current_dst[key] = current_src[key] else: if isdict(current_src[key]) and isdict(current_dst[key]): stack.append((current_dst[key], current_src[key])) else: current_dst[key] = current_src[key] return dst
Recursively update the destination dict-like object with the source dict-like object. Useful for merging options and Bunches together! Based on: http://code.activestate.com/recipes/499335-recursively-update-a-dictionary-without-hitting-py/#c1
def remove(name_or_path): click.echo() try: r = cpenv.resolve(name_or_path) except cpenv.ResolveError as e: click.echo(e) return obj = r.resolved[0] if not isinstance(obj, cpenv.VirtualEnvironment): click.echo('{} is a module. Use `cpenv module remove` instead.') return click.echo(format_objects([obj])) click.echo() user_confirmed = click.confirm( red('Are you sure you want to remove this environment?') ) if user_confirmed: click.echo('Attempting to remove...', nl=False) try: obj.remove() except Exception as e: click.echo(bold_red('FAIL')) click.echo(e) else: click.echo(bold_green('OK!'))
Remove an environment
def retrieve_descriptor(descriptor): the_descriptor = descriptor if the_descriptor is None: the_descriptor = {} if isinstance(the_descriptor, six.string_types): try: if os.path.isfile(the_descriptor): with open(the_descriptor, 'r') as f: the_descriptor = json.load(f) else: req = requests.get(the_descriptor) req.raise_for_status() req.encoding = 'utf8' the_descriptor = req.json() except (IOError, requests.exceptions.RequestException) as error: message = 'Unable to load JSON at "%s"' % descriptor six.raise_from(exceptions.DataPackageException(message), error) except ValueError as error: message = 'Unable to parse JSON at "%s". %s' % (descriptor, error) six.raise_from(exceptions.DataPackageException(message), error) if hasattr(the_descriptor, 'read'): try: the_descriptor = json.load(the_descriptor) except ValueError as e: six.raise_from(exceptions.DataPackageException(str(e)), e) if not isinstance(the_descriptor, dict): msg = 'Data must be a \'dict\', but was a \'{0}\'' raise exceptions.DataPackageException(msg.format(type(the_descriptor).__name__)) return the_descriptor
Retrieve descriptor.
def decode(geohash): lat, lon, lat_err, lon_err = decode_exactly(geohash) lats = "%.*f" % (max(1, int(round(-log10(lat_err)))) - 1, lat) lons = "%.*f" % (max(1, int(round(-log10(lon_err)))) - 1, lon) if '.' in lats: lats = lats.rstrip('0') if '.' in lons: lons = lons.rstrip('0') return lats, lons
Decode geohash, returning two strings with latitude and longitude containing only relevant digits and with trailing zeroes removed.
def validate(self, strict=True): valid = True try: jsonschema.validate(self.__json__, self.__schema__) except jsonschema.ValidationError as invalid: if strict: raise SchemaError(str(invalid)) else: warnings.warn(str(invalid)) valid = False return valid
Validate a JObject against its schema Parameters ---------- strict : bool Enforce strict schema validation Returns ------- valid : bool True if the jam validates False if not, and `strict==False` Raises ------ SchemaError If `strict==True` and `jam` fails validation
def add_tag(self, tag): self.tags = list(set(self.tags or []) | set([tag]))
Adds a tag to the list of tags and makes sure the result list contains only unique results.
def list_(args): osf = _setup_osf(args) project = osf.project(args.project) for store in project.storages: prefix = store.name for file_ in store.files: path = file_.path if path.startswith('/'): path = path[1:] print(os.path.join(prefix, path))
List all files from all storages for project. If the project is private you need to specify a username.
def filter_config(config, deploy_config): if not os.path.isfile(deploy_config): return DotDict() config_module = get_config_module(deploy_config) return config_module.filter(config)
Return a config subset using the filter defined in the deploy config.
def create_session(self): session = None if self.key_file is not None: credfile = os.path.expandvars(os.path.expanduser(self.key_file)) try: with open(credfile, 'r') as f: creds = json.load(f) except json.JSONDecodeError as e: logger.error( "EC2Provider '{}': json decode error in credential file {}".format(self.label, credfile) ) raise e except Exception as e: logger.debug( "EC2Provider '{0}' caught exception while reading credential file: {1}".format( self.label, credfile ) ) raise e logger.debug("EC2Provider '{}': Using credential file to create session".format(self.label)) session = boto3.session.Session(region_name=self.region, **creds) elif self.profile is not None: logger.debug("EC2Provider '{}': Using profile name to create session".format(self.label)) session = boto3.session.Session( profile_name=self.profile, region_name=self.region ) else: logger.debug("EC2Provider '{}': Using environment variables to create session".format(self.label)) session = boto3.session.Session(region_name=self.region) return session
Create a session. First we look in self.key_file for a path to a json file with the credentials. The key file should have 'AWSAccessKeyId' and 'AWSSecretKey'. Next we look at self.profile for a profile name and try to use the Session call to automatically pick up the keys for the profile from the user default keys file ~/.aws/config. Finally, boto3 will look for the keys in environment variables: AWS_ACCESS_KEY_ID: The access key for your AWS account. AWS_SECRET_ACCESS_KEY: The secret key for your AWS account. AWS_SESSION_TOKEN: The session key for your AWS account. This is only needed when you are using temporary credentials. The AWS_SECURITY_TOKEN environment variable can also be used, but is only supported for backwards compatibility purposes. AWS_SESSION_TOKEN is supported by multiple AWS SDKs besides python.
async def dispatch_downstream(self, message, steam_name): handler = getattr(self, get_handler_name(message), None) if handler: await handler(message, stream_name=steam_name) else: await self.base_send(message)
Handle a downstream message coming from an upstream steam. if there is not handling method set for this method type it will propagate the message further downstream. This is called as part of the co-routine of an upstream steam, not the same loop as used for upstream messages in the de-multiplexer.
def clicky(parser, token): bits = token.split_contents() if len(bits) > 1: raise TemplateSyntaxError("'%s' takes no arguments" % bits[0]) return ClickyNode()
Clicky tracking template tag. Renders Javascript code to track page visits. You must supply your Clicky Site ID (as a string) in the ``CLICKY_SITE_ID`` setting.
def call_dcm2nii(work_dir, arguments=''): if not op.exists(work_dir): raise IOError('Folder {} not found.'.format(work_dir)) cmd_line = 'dcm2nii {0} "{1}"'.format(arguments, work_dir) log.info(cmd_line) return subprocess.check_call(cmd_line, shell=True)
Converts all DICOM files within `work_dir` into one or more NifTi files by calling dcm2nii on this folder. Parameters ---------- work_dir: str Path to the folder that contain the DICOM files arguments: str String containing all the flag arguments for `dcm2nii` CLI. Returns ------- sys_code: int dcm2nii execution return code
def to_float(option,value): if type(value) is str: try: value=float(value) except ValueError: pass return (option,value)
Converts string values to floats when appropriate
def print_boggle(board): "Print the board in a 2-d array." n2 = len(board); n = exact_sqrt(n2) for i in range(n2): if i % n == 0 and i > 0: print if board[i] == 'Q': print 'Qu', else: print str(board[i]) + ' ', print
Print the board in a 2-d array.
def backend_version(backend, childprocess=None): if childprocess is None: childprocess = childprocess_default_value() if not childprocess: return _backend_version(backend) else: return run_in_childprocess(_backend_version, None, backend)
Back-end version. :param backend: back-end (examples:scrot, wx,..) :param childprocess: see :py:func:`grab` :return: version as string
def make_present_participles(verbs): res = [] for verb in verbs: parts = verb.split() if parts[0].endswith("e"): parts[0] = parts[0][:-1] + "ing" else: parts[0] = parts[0] + "ing" res.append(" ".join(parts)) return res
Make the list of verbs into present participles E.g.: empower -> empowering drive -> driving
def active_env_module_resolver(resolver, path): from .api import get_active_env env = get_active_env() if not env: raise ResolveError mod = env.get_module(path) if not mod: raise ResolveError return mod
Resolves modules in currently active environment.
def execute_sync(self, message): info("synchronizing message: {message}") with self.world._unlock_temporarily(): message._sync(self.world) self.world._react_to_sync_response(message) for actor in self.actors: actor._react_to_sync_response(message)
Respond when the server indicates that the client is out of sync. The server can request a sync when this client sends a message that fails the check() on the server. If the reason for the failure isn't very serious, then the server can decide to send it as usual in the interest of a smooth gameplay experience. When this happens, the server sends out an extra response providing the clients with the information they need to resync themselves.
def _concat_queries(queries, operators='__and__'): if not queries: raise ValueError('Expected some `queries`, got {}.'.format(queries)) if len(queries) == 1: return queries[0] if isinstance(operators, str): operators = [operators] * (len(queries) - 1) if len(queries) - 1 != len(operators): raise ValueError('Expected `operators` to be a string or a list with the same' ' length as `field_names` ({}), got {}.'.format(len(queries), operators)) first, rest, end = queries[0], queries[1:-1], queries[-1:][0] bigop = getattr(first, operators[0]) for i, q in enumerate(rest): bigop = getattr(bigop(q), operators[i]) return bigop(end)
Create a tinyDB Query object that is the concatenation of each query in `queries`. The concatenation operator is taken from `operators`. Parameters ---------- queries: list of tinydb.Query The list of tinydb.Query to be joined. operators: str or list of str List of binary operators to join `queries` into one query. Check TinyDB.Query class for possible choices. Returns ------- query: tinydb.database.Query
def file_supported(cls, filename): if not isinstance(filename, str): return False (_, ext) = os.path.splitext(filename) if ext not in cls.extensions: return False else: return True
Returns a boolean indicating whether the filename has an appropriate extension for this class.
def invoke_hook_spout_fail(self, message_id, fail_latency_ns): if len(self.task_hooks) > 0: spout_fail_info = SpoutFailInfo(message_id=message_id, spout_task_id=self.get_task_id(), fail_latency_ms=fail_latency_ns * system_constants.NS_TO_MS) for task_hook in self.task_hooks: task_hook.spout_fail(spout_fail_info)
invoke task hooks for every time spout fails a tuple :type message_id: str :param message_id: message id to which a failed tuple was anchored :type fail_latency_ns: float :param fail_latency_ns: fail latency in nano seconds
def write_extracted_licenses(lics, out): write_value('LicenseID', lics.identifier, out) if lics.full_name is not None: write_value('LicenseName', lics.full_name, out) if lics.comment is not None: write_text_value('LicenseComment', lics.comment, out) for xref in sorted(lics.cross_ref): write_value('LicenseCrossReference', xref, out) write_text_value('ExtractedText', lics.text, out)
Write extracted licenses fields to out.
def run_splitted_processing(max_simultaneous_processes, process_name, filenames): pids = [] while len(filenames) > 0: while len(filenames) > 0 and len(pids) < max_simultaneous_processes: filename = filenames.pop() pids.append(service_start(service=process_name, param=['-f', filename, '-d', imported_day])) while len(pids) == max_simultaneous_processes: time.sleep(sleep_timer) pids = update_running_pids(pids) while len(pids) > 0: time.sleep(sleep_timer) pids = update_running_pids(pids)
Run processes which push the routing dump of the RIPE in a redis database. The dump has been splitted in multiple files and each process run on one of this files.
def bdp(tickers, flds, **kwargs): logger = logs.get_logger(bdp, level=kwargs.pop('log', logs.LOG_LEVEL)) con, _ = create_connection() ovrds = assist.proc_ovrds(**kwargs) logger.info( f'loading reference data from Bloomberg:\n' f'{assist.info_qry(tickers=tickers, flds=flds)}' ) data = con.ref(tickers=tickers, flds=flds, ovrds=ovrds) if not kwargs.get('cache', False): return [data] qry_data = [] for r, snap in data.iterrows(): subset = [r] data_file = storage.ref_file( ticker=snap.ticker, fld=snap.field, ext='pkl', **kwargs ) if data_file: if not files.exists(data_file): qry_data.append(data.iloc[subset]) files.create_folder(data_file, is_file=True) data.iloc[subset].to_pickle(data_file) return qry_data
Bloomberg reference data Args: tickers: tickers flds: fields to query **kwargs: bbg overrides Returns: pd.DataFrame Examples: >>> bdp('IQ US Equity', 'Crncy', raw=True) ticker field value 0 IQ US Equity Crncy USD >>> bdp('IQ US Equity', 'Crncy').reset_index() ticker crncy 0 IQ US Equity USD
def next_item(self): queue = self.queue try: item = queue.get(block=True, timeout=5) return item except Exception: return None
Get a single item from the queue.
def _run_gevent(app, config, mode): import gevent import gevent.monkey gevent.monkey.patch_all() from gevent.pywsgi import WSGIServer server_args = { "bind_addr": (config["host"], config["port"]), "wsgi_app": app, "keyfile": None, "certfile": None, } protocol = "http" server_args.update(config.get("server_args", {})) dav_server = WSGIServer(server_args["bind_addr"], app) _logger.info("Running {}".format(dav_server)) _logger.info( "Serving on {}://{}:{} ...".format(protocol, config["host"], config["port"]) ) try: gevent.spawn(dav_server.serve_forever()) except KeyboardInterrupt: _logger.warning("Caught Ctrl-C, shutting down...") return
Run WsgiDAV using gevent if gevent is installed. See https://github.com/gevent/gevent/blob/master/src/gevent/pywsgi.py#L1356 https://github.com/gevent/gevent/blob/master/src/gevent/server.py#L38 for more options
def on_exit_stage(self): self.forum.on_finish_game() for actor in self.actors: actor.on_finish_game() with self.world._unlock_temporarily(): self.world.on_finish_game()
Give the actors, the world, and the messaging system a chance to react to the end of the game.
def spin_up_instance(self, command, job_name): command = Template(template_string).substitute(jobname=job_name, user_script=command, linger=str(self.linger).lower(), worker_init=self.worker_init) instance_type = self.instance_type subnet = self.sn_ids[0] ami_id = self.image_id total_instances = len(self.instances) if float(self.spot_max_bid) > 0: spot_options = { 'MarketType': 'spot', 'SpotOptions': { 'MaxPrice': str(self.spot_max_bid), 'SpotInstanceType': 'one-time', 'InstanceInterruptionBehavior': 'terminate' } } else: spot_options = {} if total_instances > self.max_nodes: logger.warn("Exceeded instance limit ({}). Cannot continue\n".format(self.max_nodes)) return [None] try: tag_spec = [{"ResourceType": "instance", "Tags": [{'Key': 'Name', 'Value': job_name}]}] instance = self.ec2.create_instances( MinCount=1, MaxCount=1, InstanceType=instance_type, ImageId=ami_id, KeyName=self.key_name, SubnetId=subnet, SecurityGroupIds=[self.sg_id], TagSpecifications=tag_spec, InstanceMarketOptions=spot_options, InstanceInitiatedShutdownBehavior='terminate', IamInstanceProfile={'Arn': self.iam_instance_profile_arn}, UserData=command ) except ClientError as e: print(e) logger.error(e.response) return [None] except Exception as e: logger.error("Request for EC2 resources failed : {0}".format(e)) return [None] self.instances.append(instance[0].id) logger.info( "Started up 1 instance {} . Instance type:{}".format(instance[0].id, instance_type) ) return instance
Start an instance in the VPC in the first available subnet. N instances will be started if nodes_per_block > 1. Not supported. We only do 1 node per block. Parameters ---------- command : str Command string to execute on the node. job_name : str Name associated with the instances.
def sort_by_modified(files_or_folders: list) -> list: return sorted(files_or_folders, key=os.path.getmtime, reverse=True)
Sort files or folders by modified time Args: files_or_folders: list of files or folders Returns: list
def contains_ignoring_case(self, *items): if len(items) == 0: raise ValueError('one or more args must be given') if isinstance(self.val, str_types): if len(items) == 1: if not isinstance(items[0], str_types): raise TypeError('given arg must be a string') if items[0].lower() not in self.val.lower(): self._err('Expected <%s> to case-insensitive contain item <%s>, but did not.' % (self.val, items[0])) else: missing = [] for i in items: if not isinstance(i, str_types): raise TypeError('given args must all be strings') if i.lower() not in self.val.lower(): missing.append(i) if missing: self._err('Expected <%s> to case-insensitive contain items %s, but did not contain %s.' % (self.val, self._fmt_items(items), self._fmt_items(missing))) elif isinstance(self.val, Iterable): missing = [] for i in items: if not isinstance(i, str_types): raise TypeError('given args must all be strings') found = False for v in self.val: if not isinstance(v, str_types): raise TypeError('val items must all be strings') if i.lower() == v.lower(): found = True break if not found: missing.append(i) if missing: self._err('Expected <%s> to case-insensitive contain items %s, but did not contain %s.' % (self.val, self._fmt_items(items), self._fmt_items(missing))) else: raise TypeError('val is not a string or iterable') return self
Asserts that val is string and contains the given item or items.
def expose_ancestors_or_children(self, member, collection, lang=None): x = { "id": member.id, "label": str(member.get_label(lang)), "model": str(member.model), "type": str(member.type), "size": member.size, "semantic": self.semantic(member, parent=collection) } if isinstance(member, ResourceCollection): x["lang"] = str(member.lang) return x
Build an ancestor or descendant dict view based on selected information :param member: Current Member to build for :param collection: Collection from which we retrieved it :param lang: Language to express data in :return:
def get_mongoadmins(self): apps = [] for app_name in settings.INSTALLED_APPS: mongoadmin = "{0}.mongoadmin".format(app_name) try: module = import_module(mongoadmin) except ImportError as e: if str(e).startswith("No module named"): continue raise e app_store = AppStore(module) apps.append(dict( app_name=app_name, obj=app_store )) return apps
Returns a list of all mongoadmin implementations for the site
def move_next_to(self, body_a, body_b, offset_a, offset_b): ba = self.get_body(body_a) bb = self.get_body(body_b) if ba is None: return bb.relative_offset_to_world(offset_b) if bb is None: return ba.relative_offset_to_world(offset_a) anchor = ba.relative_offset_to_world(offset_a) offset = bb.relative_offset_to_world(offset_b) bb.position = bb.position + anchor - offset return anchor
Move one body to be near another one. After moving, the location described by ``offset_a`` on ``body_a`` will be coincident with the location described by ``offset_b`` on ``body_b``. Parameters ---------- body_a : str or :class:`Body` The body to use as a reference for moving the other body. If this is a string, it is treated as the name of a body to look up in the world. body_b : str or :class:`Body` The body to move next to ``body_a``. If this is a string, it is treated as the name of a body to look up in the world. offset_a : 3-tuple of float The offset of the anchor point, given as a relative fraction of the size of ``body_a``. See :func:`Body.relative_offset_to_world`. offset_b : 3-tuple of float The offset of the anchor point, given as a relative fraction of the size of ``body_b``. Returns ------- anchor : 3-tuple of float The location of the shared point, which is often useful to use as a joint anchor.
def get_exception_from_status_and_error_codes(status_code, error_code, value): if status_code == requests.codes.bad_request: exception = BadRequest(value) elif status_code == requests.codes.unauthorized: exception = Unauthorized(value) elif status_code == requests.codes.forbidden: exception = Unauthorized(value) elif status_code in [requests.codes.not_found, requests.codes.gone]: exception = NotFound(value) elif status_code == requests.codes.method_not_allowed: exception = MethodNotAllowed(value) elif status_code >= requests.codes.bad_request: exception = HTTPError(value) else: exception = ResponseError(value) if error_code == -100: exception = InternalError(value) elif error_code == -101: exception = InvalidToken(value) elif error_code == -105: exception = UploadFailed(value) elif error_code == -140: exception = UploadTokenGenerationFailed(value) elif error_code == -141: exception = InvalidUploadToken(value) elif error_code == -150: exception = InvalidParameter(value) elif error_code == -151: exception = InvalidPolicy(value) return exception
Return an exception given status and error codes. :param status_code: HTTP status code. :type status_code: None | int :param error_code: Midas Server error code. :type error_code: None | int :param value: Message to display. :type value: string :returns: Exception. :rtype : pydas.exceptions.ResponseError
def add_example(self, example): "Add an example to the list of examples, checking it first." self.check_example(example) self.examples.append(example)
Add an example to the list of examples, checking it first.
def removeAllRecords(self): for field in self.fields: field.encodings, field.values=[], [] field.numRecords, field.numEncodings= (0, 0)
Deletes all the values in the dataset
async def get_size(media): if hasattr(media, 'seek'): await execute(media.seek(0, os.SEEK_END)) size = await execute(media.tell()) await execute(media.seek(0)) elif hasattr(media, 'headers'): size = int(media.headers['Content-Length']) elif isinstance(media, bytes): size = len(media) else: raise TypeError("Can't get size of media of type:", type(media).__name__) _logger.info("media size: %dB" % size) return size
Get the size of a file Parameters ---------- media : file object The file object of the media Returns ------- int The size of the file
def set_color_list(self, color_list, offset=0): if not len(color_list): return color_list = make.colors(color_list) size = len(self._colors) - offset if len(color_list) > size: color_list = color_list[:size] self._colors[offset:offset + len(color_list)] = color_list
Set the internal colors starting at an optional offset. If `color_list` is a list or other 1-dimensional array, it is reshaped into an N x 3 list. If `color_list` too long it is truncated; if it is too short then only the initial colors are set.
def find(self, y): node = self.root while True: edge = self._edgeLabel(node, node.parent) if edge.startswith(y): return node.idx i = 0 while(i < len(edge) and edge[i] == y[0]): y = y[1:] i += 1 if i != 0: if i == len(edge) and y != '': pass else: return -1 node = node._get_transition_link(y[0]) if not node: return -1
Returns starting position of the substring y in the string used for building the Suffix tree. :param y: String :return: Index of the starting position of string y in the string used for building the Suffix tree -1 if y is not a substring.
def add_item(self,jid,node=None,name=None,action=None): return DiscoItem(self,jid,node,name,action)
Add a new item to the `DiscoItems` object. :Parameters: - `jid`: item JID. - `node`: item node name. - `name`: item name. - `action`: action for a "disco push". :Types: - `jid`: `pyxmpp.JID` - `node`: `unicode` - `name`: `unicode` - `action`: `unicode` :returns: the item created. :returntype: `DiscoItem`.