query
stringlengths
9
3.4k
document
stringlengths
9
87.4k
metadata
dict
negatives
listlengths
4
101
negative_scores
listlengths
4
101
document_score
stringlengths
3
10
document_rank
stringclasses
102 values
run function my_squares and my_join
def run_my_funcs(x,y): print(x,y) my_squares(x) my_join(x,y) return 0
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def run_my_funcs(x, y):\n print\n my_squares(x)\n my_join(x, y)\n return 0", "def double_chop_pairs(\n x1, y1, z1, w1, cell1, x2, y2, z2, w2, indx2, rbins_squared, result):\n start = cuda.grid(1)\n stride = cuda.gridsize(1)\n\n n1 = x1.shape[0]\n nbins = rbins_squared.shape[0]\n\n ...
[ "0.7624191", "0.5538901", "0.5165043", "0.50714666", "0.50639325", "0.5047271", "0.5002455", "0.49778813", "0.4968627", "0.49685135", "0.495568", "0.49273586", "0.48960102", "0.48864695", "0.48659024", "0.48147792", "0.48095623", "0.48085663", "0.4803549", "0.4795789", "0.479...
0.7545948
2
Adds the keys 'logits' and 'probs' to the end points dictionary of ResNet50v2.
def _get_updated_endpoints(original_end_points, name): end_points = dict(original_end_points) end_points['logits'] = tf.squeeze(end_points[name], [1, 2]) end_points['probs'] = tf.nn.softmax(end_points['logits']) return end_points
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def augment(self):\n n1 = { 'edges': [ self.next_insert['pred'], self.next_insert ], 'pred': self.next_insert['pred'] }\n n2 = { 'edges': [ n1, self.next_insert ], 'pred': n1 }\n self.next_insert['pred'] = n2\n self.next_insert = n2\n self.nodect += 2", "def init_output_dict(se...
[ "0.52572966", "0.52147967", "0.5190277", "0.5172461", "0.5160112", "0.5107622", "0.5044875", "0.502481", "0.49618483", "0.49577186", "0.49346328", "0.4871828", "0.47935718", "0.47819144", "0.47578776", "0.4739234", "0.47227845", "0.46987733", "0.46696952", "0.46637616", "0.46...
0.56161755
0
Initializes the tensorflow graph for the ResNet50v2 model.
def __init__(self, x, num_classes=15, is_training=False): super(resnet_v2_50, self).__init__() self.x = x self.num_classes = num_classes # populating the tensorflow graph with slim.arg_scope(arg_scopes_map['resnet_v2_50']()): net, end_points = networks_map['resnet_v2_50']( x, num_classes=num_classes, is_training=is_training, reuse=None) self.end_points = _get_updated_endpoints(end_points, 'resnet_v2_50/logits') self.variables_to_restore = slim.get_variables_to_restore(exclude=[])
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def initialize_network(self):\n self.sess = tf.InteractiveSession()\n sys.stderr.write(\"------\\n\")\n self.model.create_model()\n self._initialize_trainer()\n self.sess.run(tf.initialize_all_variables())\n self.saver = tf.train.Saver()", "def build_graph(self):\n ...
[ "0.71776015", "0.6739433", "0.6669668", "0.660558", "0.65882456", "0.6516973", "0.6443085", "0.6423372", "0.64015436", "0.63999057", "0.6339511", "0.63393587", "0.6328483", "0.631329", "0.63033473", "0.62763274", "0.62757164", "0.627243", "0.6259759", "0.6259759", "0.6252365"...
0.6228969
23
Load weights from a checkpoint file into the tensorflow graph.
def load_weights(self, checkpoint_path, sess=None): if sess is None: sess = tf.get_default_session() assert sess is not None saver = tf.train.Saver(self.variables_to_restore) saver.restore(sess, checkpoint_path)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def load_model_weights(sess, checkpoint_dir):\n\n init_fn = slim.assign_from_checkpoint_fn(\n checkpoint_dir, slim.get_model_variables(), ignore_missing_vars=True)\n init_fn(sess)", "def load_weights_from_checkpoint(self, path: str, key: str):\n ckpt = torch.load(path, map_location='cpu')\n ...
[ "0.76077384", "0.74392045", "0.74392045", "0.7414661", "0.7232018", "0.71317524", "0.7071087", "0.70339084", "0.700918", "0.6977498", "0.6935631", "0.691333", "0.68927085", "0.68749905", "0.68339276", "0.6816416", "0.6816042", "0.67910594", "0.6769799", "0.6769098", "0.672785...
0.77680707
2
Lists the model's parameters.
def get_params(self): return None
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def getListOfParameters(self):\n return self.model.getListOfParameters()", "def getListOfParameters(self, *args):\n return _libsbml.Model_getListOfParameters(self, *args)", "def parameters(self):\n return self.model.parameters()", "def parameters(self):\n return []", "def print_...
[ "0.80538356", "0.75631976", "0.72960615", "0.7105871", "0.7048238", "0.6971301", "0.6963369", "0.6942364", "0.69122803", "0.6896806", "0.6824722", "0.6821565", "0.676841", "0.67298985", "0.67298985", "0.67248833", "0.6719116", "0.6717513", "0.6717513", "0.67164814", "0.671245...
0.61706924
91
Exposes all the layers of the model.
def fprop(self, x): if x is self.x: return self.end_points else: with slim.arg_scope(arg_scopes_map['resnet_v2_50']()): net, end_points = networks_map['resnet_v2_50']( x, num_classes=self.num_classes, is_training=False, reuse=tf.AUTO_REUSE) return _get_updated_endpoints(end_points, 'resnet_v2_50/logits')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def layers(self): # -> LayerView:\n ...", "def build_layers(self):\n raise NotImplementedError", "def setup_layers(self):\n if self.args.model == \"exact\":\n self.layer = PPNPLayer\n else:\n self.layer = APPNPLayer\n self.setup_layer_structure()", "def _g...
[ "0.72325057", "0.6624318", "0.62856334", "0.62295145", "0.6214809", "0.6171816", "0.60817003", "0.5990596", "0.59697384", "0.5955498", "0.5902771", "0.58940166", "0.5883603", "0.5861259", "0.5861006", "0.5847972", "0.5845203", "0.5828398", "0.5788822", "0.5782679", "0.5767099...
0.0
-1
Initializes the tensorflow graph for the ResNet50v2 model.
def __init__(self, x, num_classes=15, is_training=False): super(resnet_v2_152, self).__init__() self.x = x self.num_classes = num_classes # populating the tensorflow graph with slim.arg_scope(arg_scopes_map['resnet_v2_152']()): net, end_points = networks_map['resnet_v2_152']( x, num_classes=num_classes, is_training=is_training, reuse=None) self.end_points = _get_updated_endpoints(end_points, 'resnet_v2_152/logits') self.variables_to_restore = slim.get_variables_to_restore(exclude=[])
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def initialize_network(self):\n self.sess = tf.InteractiveSession()\n sys.stderr.write(\"------\\n\")\n self.model.create_model()\n self._initialize_trainer()\n self.sess.run(tf.initialize_all_variables())\n self.saver = tf.train.Saver()", "def build_graph(self):\n ...
[ "0.71776015", "0.6739433", "0.6669668", "0.660558", "0.65882456", "0.6516973", "0.6443085", "0.6423372", "0.64015436", "0.63999057", "0.6339511", "0.63393587", "0.6328483", "0.631329", "0.63033473", "0.62763274", "0.62757164", "0.627243", "0.6259759", "0.6259759", "0.6252365"...
0.0
-1
Load weights from a checkpoint file into the tensorflow graph.
def load_weights(self, checkpoint_path, sess=None): if sess is None: sess = tf.get_default_session() assert sess is not None saver = tf.train.Saver(self.variables_to_restore) saver.restore(sess, checkpoint_path)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def load_model_weights(sess, checkpoint_dir):\n\n init_fn = slim.assign_from_checkpoint_fn(\n checkpoint_dir, slim.get_model_variables(), ignore_missing_vars=True)\n init_fn(sess)", "def load_weights_from_checkpoint(self, path: str, key: str):\n ckpt = torch.load(path, map_location='cpu')\n ...
[ "0.76077384", "0.74392045", "0.74392045", "0.7414661", "0.7232018", "0.71317524", "0.7071087", "0.70339084", "0.700918", "0.6977498", "0.6935631", "0.691333", "0.68927085", "0.68749905", "0.68339276", "0.6816416", "0.6816042", "0.67910594", "0.6769799", "0.6769098", "0.672785...
0.77680707
1
Lists the model's parameters.
def get_params(self): return None
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def getListOfParameters(self):\n return self.model.getListOfParameters()", "def getListOfParameters(self, *args):\n return _libsbml.Model_getListOfParameters(self, *args)", "def parameters(self):\n return self.model.parameters()", "def parameters(self):\n return []", "def print_...
[ "0.80538356", "0.75631976", "0.72960615", "0.7105871", "0.7048238", "0.6971301", "0.6963369", "0.6942364", "0.69122803", "0.6896806", "0.6824722", "0.6821565", "0.676841", "0.67298985", "0.67298985", "0.67248833", "0.6719116", "0.6717513", "0.6717513", "0.67164814", "0.671245...
0.61706924
90
Exposes all the layers of the model.
def fprop(self, x): if x is self.x: return self.end_points else: with slim.arg_scope(arg_scopes_map['resnet_v2_152']()): net, end_points = networks_map['resnet_v2_152']( x, num_classes=self.num_classes, is_training=False, reuse=tf.AUTO_REUSE) return _get_updated_endpoints(end_points, 'resnet_v2_152/logits')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def layers(self): # -> LayerView:\n ...", "def build_layers(self):\n raise NotImplementedError", "def setup_layers(self):\n if self.args.model == \"exact\":\n self.layer = PPNPLayer\n else:\n self.layer = APPNPLayer\n self.setup_layer_structure()", "def _g...
[ "0.72325057", "0.6624318", "0.62856334", "0.62295145", "0.6214809", "0.6171816", "0.60817003", "0.5990596", "0.59697384", "0.5955498", "0.5902771", "0.58940166", "0.5883603", "0.5861259", "0.5861006", "0.5847972", "0.5845203", "0.5828398", "0.5788822", "0.5782679", "0.5767099...
0.0
-1
Initializes the tensorflow graph for the ResNet50v2 model.
def __init__(self, x, num_classes=15, is_training=False): super(mobilenet_v2, self).__init__() self.x = x self.num_classes = num_classes # populating the tensorflow graph with slim.arg_scope(arg_scopes_map['mobilenet_v2']()): net, end_points = networks_map['mobilenet_v2']( x, num_classes=num_classes, is_training=is_training, reuse=None) self.end_points = _get_updated_endpoints(end_points, 'Logits') self.variables_to_restore = slim.get_variables_to_restore(exclude=[])
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def initialize_network(self):\n self.sess = tf.InteractiveSession()\n sys.stderr.write(\"------\\n\")\n self.model.create_model()\n self._initialize_trainer()\n self.sess.run(tf.initialize_all_variables())\n self.saver = tf.train.Saver()", "def build_graph(self):\n ...
[ "0.71776015", "0.6739433", "0.6669668", "0.660558", "0.65882456", "0.6516973", "0.6443085", "0.6423372", "0.64015436", "0.63999057", "0.6339511", "0.63393587", "0.6328483", "0.631329", "0.63033473", "0.62763274", "0.62757164", "0.627243", "0.6259759", "0.6259759", "0.6252365"...
0.0
-1
Load weights from a checkpoint file into the tensorflow graph.
def load_weights(self, checkpoint_path, sess=None): if sess is None: sess = tf.get_default_session() assert sess is not None saver = tf.train.Saver(self.variables_to_restore) saver.restore(sess, checkpoint_path)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def load_model_weights(sess, checkpoint_dir):\n\n init_fn = slim.assign_from_checkpoint_fn(\n checkpoint_dir, slim.get_model_variables(), ignore_missing_vars=True)\n init_fn(sess)", "def load_weights_from_checkpoint(self, path: str, key: str):\n ckpt = torch.load(path, map_location='cpu')\n ...
[ "0.76077384", "0.74392045", "0.74392045", "0.7414661", "0.7232018", "0.71317524", "0.7071087", "0.70339084", "0.700918", "0.6977498", "0.6935631", "0.691333", "0.68927085", "0.68749905", "0.68339276", "0.6816416", "0.6816042", "0.67910594", "0.6769799", "0.6769098", "0.672785...
0.77680707
0
Lists the model's parameters.
def get_params(self): return None
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def getListOfParameters(self):\n return self.model.getListOfParameters()", "def getListOfParameters(self, *args):\n return _libsbml.Model_getListOfParameters(self, *args)", "def parameters(self):\n return self.model.parameters()", "def parameters(self):\n return []", "def print_...
[ "0.80538356", "0.75631976", "0.72960615", "0.7105871", "0.7048238", "0.6971301", "0.6963369", "0.6942364", "0.69122803", "0.6896806", "0.6824722", "0.6821565", "0.676841", "0.67298985", "0.67298985", "0.67248833", "0.6719116", "0.6717513", "0.6717513", "0.67164814", "0.671245...
0.61706924
92
Exposes all the layers of the model.
def fprop(self, x): if x is self.x: return self.end_points else: with slim.arg_scope(arg_scopes_map['mobilenet_v2']()): net, end_points = networks_map['mobilenet_v2']( x, num_classes=self.num_classes, is_training=False, reuse=tf.AUTO_REUSE) return _get_updated_endpoints(end_points, 'Logits')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def layers(self): # -> LayerView:\n ...", "def build_layers(self):\n raise NotImplementedError", "def setup_layers(self):\n if self.args.model == \"exact\":\n self.layer = PPNPLayer\n else:\n self.layer = APPNPLayer\n self.setup_layer_structure()", "def _g...
[ "0.72325057", "0.6624318", "0.62856334", "0.62295145", "0.6214809", "0.6171816", "0.60817003", "0.5990596", "0.59697384", "0.5955498", "0.5902771", "0.58940166", "0.5883603", "0.5861259", "0.5861006", "0.5847972", "0.5845203", "0.5828398", "0.5788822", "0.5782679", "0.5767099...
0.0
-1
The top level function.
def eval_tree(tree): global genv global result # Here, get the list of children nodes. Iterate over that list, calling eval_node on each node. for node in tree.body: val = eval_node(node, genv) result = val[0] genv = val[1] return result
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def main():\n pass", "def main():\n pass", "def main():\n pass", "def main():\n pass", "def main():\n pass", "def main():\n pass", "def main():\n pass", "def main():\n pass", "def main():\n pass", "def main():\n pass", "def main():\n pass", "def main():\n p...
[ "0.69298464", "0.69298464", "0.69298464", "0.69298464", "0.69298464", "0.69298464", "0.69298464", "0.69298464", "0.69298464", "0.69298464", "0.69298464", "0.69298464", "0.69298464", "0.69298464", "0.69298464", "0.69298464", "0.69298464", "0.69298464", "0.69298464", "0.69298464"...
0.0
-1
Evaluates a Node object in the abstract syntax tree.
def eval_node(node, env): global genv global result node_type = node_name(node) if node_type == 'Expr': return eval_node(node.value, env) elif node_type == 'Assign': val = eval_node(node.value, env) while type(val) is tuple and len(val) == 2 and (type(val[1]) == GlobalEnv or type(val[1]) == LocalEnv): val = val[0] # extract the variable name, evaluate the RHS, then extend the environment. return 0, env.extend([node.targets[0].id], [val]) elif node_type == 'BinOp': # get the left and right operands (we use only single operands) and the operator. # evaluate the operands and apply the operator. return the number, env. left = eval_node(node.left, env)[0] right = eval_node(node.right, env)[0] left = left[0] if type(left) is tuple else left right = right[0] if type(right) is tuple else right op = node_name(node.op) if op == "Add": return (left + right), env elif op == "Sub": return (left - right), env elif op == "Mult": return (left * right), env elif op == "Div": return (left / right), env elif op == "Mod": return (left % right), env return 0, env elif node_type == 'FunctionDef': # need the function id (name), args, and body. Extend the environment. # you can leave the args wrapped in the ast class and the body and unpack them # when the function is called. return 0, env.extend([node.name], [(node.args, node.body)]) elif node_type == 'Call': # get any values passed in to the function from the Call object. # get the fxn name and look up its parameters, if any, and body from the env. # get lists for parameter names and values and extend a LocalEnv with those bindings. # evaluate the body in the local env, return the value, env. func = eval_node(node.func, env)[0] local_env = LocalEnv(None, env) args = func[0].args body = func[1] index = 0 for val in node.args: local_env = local_env.extend([args[index].arg], [eval_node(val, local_env)[0]]) index += 1 for node in body: val = eval_node(node, local_env) if node_name(node) == "Return": output_val = val[0] local_env = val[1] return output_val, env elif node_type == 'Return': # evaluate the node, return the value, env. return eval_node(node.value, env) elif node_type == 'Name': # Name(identifier id)- lookup the value binding in the env # return the value, env return env.lookup(node.id), env # Num(object n) -- a number, return the number, env. elif node_type == 'Num': return node.n, env
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def evaluate(self, tree):\n\t\tpass", "def eval(self, node):\n\n return None", "def eval(self):\n return self._eval_node(self.syntax_tree)", "def eval(node):\n if node.id == '(literal)':\n return node.value\n elif node.id == '(name)':\n return scope[node.value]\n elif nod...
[ "0.7579689", "0.7281968", "0.7104537", "0.65870726", "0.63241583", "0.62537116", "0.62466973", "0.6190615", "0.6190615", "0.6085275", "0.60787815", "0.6074576", "0.60320526", "0.6001459", "0.59816545", "0.5969225", "0.5951202", "0.5924543", "0.59197736", "0.5908411", "0.59069...
0.63417566
4
Warn about unused static variables.
def _find_unused_static_warnings(filename, lines, ast_list): static_declarations = { node.name: node for node in ast_list if (isinstance(node, ast.VariableDeclaration) and 'static' in node.type.modifiers) } def find_variables_use(body): for child in body: if child.name in static_declarations: static_use_counts[child.name] += 1 static_use_counts = collections.Counter() for node in ast_list: if isinstance(node, ast.Function) and node.body: find_variables_use(node.body) elif isinstance(node, ast.Class) and node.body: for child in node.body: if isinstance(child, ast.Function) and child.body: find_variables_use(child.body) for name in sorted(static_declarations): if not static_use_counts[name]: print("{}:{}: unused variable '{}'".format( filename, lines.get_line_number(static_declarations[name].start), name))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def unusedVars(self):\n fullcode = self.code_cfg\n variables = set([x[1:] for x in codeconfig_getvars(fullcode)])\n exceptions = set(['complexity', 'code_cfg'])\n clsvars = set(vars(self).keys())\n nones = set(filter(lambda x: self.__dict__[x] is None, clsvars))\n nones = ...
[ "0.6999553", "0.684138", "0.61850595", "0.6036887", "0.58252496", "0.57561684", "0.5704306", "0.56855494", "0.56610996", "0.56531423", "0.5616008", "0.55835503", "0.5551007", "0.55266124", "0.5507778", "0.5507778", "0.5507778", "0.5507778", "0.5507778", "0.5507778", "0.550777...
0.7391628
0
Implemented the validation step in training.
def validate(self): # start validate self.model.eval() preds, labels = [], [] for batch_idx, data in enumerate(self.valid_dataloader): # calculate and log losses losses_report, valid_preds, valid_labels = self.forward_one_batch( data) self._update_losses(losses_report, train=False) preds.append(valid_preds) labels.append(valid_labels) preds = np.concatenate(preds, axis=0) labels = np.concatenate(labels, axis=0) if IS_REG: preds = disc(preds) # calculate and log metrics metrics_report = self.evaluate_metrics(preds, labels) self._update_metrics(metrics_report, train=False) # TODO: lr scheduler step setting self.lr_scheduler.step(self.valid_loss_meters['CrossEntropyLoss'].avg) # end validate self.model.train()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def validation_epoch(self):\n self.model.eval()\n\n # Compute for training set\n train_loss, train_acc = compute_loss_and_accuracy(\n self.dataloader_train, self.model, self.loss_criterion\n )\n self.TRAIN_ACC.append(train_acc)\n self.TRAIN_LOSS.append(train_los...
[ "0.7692694", "0.75106955", "0.72990733", "0.7281097", "0.7261256", "0.7184241", "0.7167047", "0.71011925", "0.7094827", "0.7077488", "0.7063481", "0.70511085", "0.7027731", "0.69944483", "0.6994215", "0.6989954", "0.6987662", "0.6927485", "0.6925709", "0.69221634", "0.6896365...
0.747869
2
Train the model for one epoch.
def fit_one_epoch(self): preds, labels = [], [] for batch_idx, data in tqdm(enumerate(self.primary_dataloader)): losses_report, train_preds, train_labels = self.forward_one_batch( data) preds.append(train_preds) labels.append(train_labels) self._optimize(losses_report) self._update_losses(losses_report, train=True) self.iter += 1 # log/check point with torch.no_grad(): if self.iter % self.log_iter == 0: # TODO: track train preds = np.concatenate(preds, axis=0) labels = np.concatenate(labels, axis=0) if IS_REG: preds = disc(preds) metrics_report = self.evaluate_metrics(preds, labels) self._update_metrics(metrics_report, train=True) preds, labels = [], [] if self.valid_dataloader: self.validate() self.log_meters() self.save_checkpoint() self.reset_meters()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def train_one_epoch(self):\n raise NotImplementedError", "def train_one_epoch(self):\n\t\tself.model.train()\n\t\ttrain_loss = 0\n\n\t\tfor batch_idx, data in enumerate(self.data_loader.train_loader):\n\t\t\tInput = data[0].float().to(self.device)\n\t\t\tOutput = data[1].float().to(self.device)\n\n\t\t\ts...
[ "0.84007376", "0.82546407", "0.7960908", "0.795539", "0.78864366", "0.7852404", "0.7852404", "0.7852404", "0.7852404", "0.7800227", "0.7787899", "0.7712295", "0.7680872", "0.76052165", "0.75889987", "0.75747776", "0.7558251", "0.7551875", "0.7530717", "0.75160545", "0.751391"...
0.71643716
58
Forward pass one batch of the data with the model.
def forward_one_batch(self, data, inference=False): inputs = data['img'] labels = data.get('label', None) inputs = inputs.cuda() outputs = self.model(inputs) losses_report = None if not inference: labels = labels.cuda() losses_report = self.compute_losses(outputs, labels) return losses_report, outputs.detach().cpu().numpy(), labels.detach( ).cpu().numpy() if labels is not None else labels
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def forward(self, batch):\n raise NotImplementedError", "def forward(self, batch):\n raise NotImplementedError", "def forward_batch(self,batcher, phase=0):\n pass", "def forward(self, x):\n #batch_size = x.shape[0]\n out = self.model(x)\n return out", "def forward_...
[ "0.83496284", "0.83496284", "0.76277405", "0.7380476", "0.6854552", "0.6846691", "0.68106997", "0.67724216", "0.67681694", "0.673687", "0.67066", "0.665859", "0.66289693", "0.65885663", "0.65885663", "0.65885663", "0.6574135", "0.65682745", "0.6561686", "0.6558493", "0.654809...
0.6389994
35
Save the training checkpoint to the disk.
def save_checkpoint(self): if not self.save_ckpt: return lookup = None is_best = False checkpoint = self.create_checkpoint() # save best only or not? if self.save_best_only: if self.valid_dataloader: for item in [self.valid_metric_meters, self.valid_loss_meters]: if self.primary_indicator in item: lookup = item else: for item in [self.train_metric_meters, self.train_loss_meters]: if self.primary_indicator in item: lookup = item if lookup: value = lookup[self.primary_indicator].avg if self.best_mode == 'min': if value < self.best_indicator: self.best_indicator = value is_best = True else: if value > self.best_indicator: self.best_indicator = value is_best = True # TODO: better naming convention if self.valid_dataloader: metric_string = '-'.join([ f'{metric}-[{self.valid_metric_meters[metric].avg:.5f}]' for metric in self.valid_metric_meters ]) loss_string = '-'.join([ f'{loss}-[{self.valid_loss_meters[loss].avg:.5f}]' for loss in self.valid_loss_meters ]) else: metric_string = '-'.join([ f'{metric}-[{self.train_metric_meters[metric].avg:.5f}]' for metric in self.train_metric_meters ]) loss_string = '-'.join([ f'{loss}-[{self.train_loss_meters[loss].avg:.5f}]' for loss in self.train_loss_meters ]) # TODO: use config for paths # make subdir folder = Path(self.save_path, str(self.fold_idx)) folder.mkdir(parents=True, exist_ok=True) if not self.save_best_only or (self.save_best_only and is_best): torch.save(checkpoint, f'{folder}/ep-[{self.epoch}]-iter-[{self.iter}]-{loss_string}-{metric_string}.pth')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def save_checkpoint(self):\n \n if not os.path.isdir(self.path + '/checkpoint/'):\n os.makedirs(self.path + '/checkpoint/')\n\n if self.saver == None:\n with self.graph.as_default():\n self.saver = tf.train.Saver(tf.global_variables())\n\n self.saver...
[ "0.8523187", "0.82452935", "0.8190791", "0.8153993", "0.81345206", "0.812798", "0.8117262", "0.80906636", "0.80737823", "0.79668844", "0.79027486", "0.77434766", "0.77273154", "0.7645351", "0.764001", "0.76386636", "0.7630027", "0.76060045", "0.75488526", "0.75404406", "0.750...
0.727966
43
max is 5 per city instead of like 6, 12 or 10 =(round((1 + (0.5(B41 1) / 5))B410.25, 2))12 so i think we take this formula and change the 5 to 4
def calculate_production_bonus(self, number_of_improvements, max_slots): pass
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_max_score(location_list, grid, shape):", "def get_max(self):\r\n df = pd.read_csv(\"MonthlyRate.csv\")\r\n df = df[df.CurrencyCode == self.choice]\r\n maximum = df.max(axis=1).values[0]\r\n # Round the value to 4 d.p.\r\n maximum = round(float(maximum), 4)\r\n re...
[ "0.61874086", "0.59167874", "0.58895576", "0.5768458", "0.57580644", "0.5663183", "0.5629974", "0.5617509", "0.557116", "0.55564797", "0.5527265", "0.55069757", "0.55021864", "0.5498278", "0.54592294", "0.5444004", "0.54354906", "0.5419152", "0.5413824", "0.539782", "0.538697...
0.0
-1
Given a query, and an update clause, update all (and only) object returned by query.
def test_update(self): pass
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def update(self, **kwargs):\n expr = self.model.__table__.update().where(self.query).values(**kwargs)\n return self._runquery(expr)", "def update(self, **kwargs):\n self._not_support_combined_queries(\"update\")\n if self.query.is_sliced:\n raise TypeError(\"Cannot update a...
[ "0.6956026", "0.6809811", "0.67811257", "0.6622099", "0.6616726", "0.6503182", "0.6379676", "0.63170165", "0.62703687", "0.62077737", "0.6169037", "0.61661786", "0.61275154", "0.6101097", "0.60837984", "0.6061122", "0.60530907", "0.6043129", "0.60077256", "0.5953688", "0.5947...
0.0
-1
Given a primary key, update the referenced object according to the update clause
def test_update_one(self): pass
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def update(cls, row_id, **kwargs):\n cls.delete(row_id)\n # obj = cls.query.filter_by(id=row_id).first()\n # for k, v in kwargs.items():\n # obj[k] = v\n # obj = cls.query.filter_by(id=row_id).update(kwargs)\n kwargs[\"id\"] = row_id\n obj = cls(**kwargs)\n ...
[ "0.686397", "0.6536987", "0.6483919", "0.6443422", "0.64308685", "0.63702625", "0.6346544", "0.6217846", "0.6217339", "0.61647564", "0.611606", "0.6064789", "0.6061465", "0.6044797", "0.6041759", "0.59926885", "0.5989239", "0.59867716", "0.5947112", "0.5912526", "0.5893085", ...
0.0
-1
Given a query, remove all (and only) object returned by query.
def test_remove(self): pass
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def delete_by_query(self, query, params = {}):\n params['hitsPerPage'] = 1000\n params['attributesToRetrieve'] = ['objectID']\n\n res = self.search(query, params)\n while (res['nbHits'] != 0):\n object_ids = []\n for elt in res['hits']:\n object_ids....
[ "0.7295163", "0.71565104", "0.6959021", "0.6853164", "0.660744", "0.6603074", "0.6566388", "0.6547425", "0.65339786", "0.65339786", "0.6423338", "0.64070696", "0.63401437", "0.6265542", "0.6230876", "0.61956275", "0.61956275", "0.6149756", "0.6123274", "0.6103659", "0.6098419...
0.0
-1
Given a primary key, remove the referenced object.
def test_remove_one(self): pass
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def removeidfobject(self, idfobject):\n key = idfobject.key.upper()\n self.idfobjects[key].remove(idfobject)\n self._reset_dependant_vars(\"idfobjects\")", "def del_object_from_parent(self):\n if self.parent:\n self.parent.objects.pop(self.ref)", "def remove(self, _id):\n...
[ "0.7022169", "0.69352937", "0.67359215", "0.6719173", "0.6582321", "0.65806663", "0.6567136", "0.6532157", "0.6484623", "0.64809275", "0.64600873", "0.64578646", "0.6424982", "0.64086294", "0.639342", "0.63872486", "0.63722575", "0.6369881", "0.63674027", "0.6356457", "0.6356...
0.0
-1
Executes the status change.
def execute(self, agent: Agent, state: SimState) -> None: pass
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def updateStatus(self, status):\n pass", "def change_status(self, status, application_id):", "def StatusChanged(self, state, info):\n pass", "def change_status(self):\n if self.status == 'in progress':\n self.status = 'done'\n return self.status\n elif self.s...
[ "0.7463658", "0.73751247", "0.7124807", "0.70087713", "0.69775283", "0.69684094", "0.6961473", "0.6950376", "0.6876861", "0.67328113", "0.6729613", "0.6650536", "0.66410106", "0.65832186", "0.65808666", "0.6556705", "0.6552023", "0.6541808", "0.6536998", "0.6520757", "0.65162...
0.0
-1
Basically the same method as in the DefaultStatusStrategy, but adding the lethality check.
def execute(self, agent: Agent, state: SimState) -> None: if agent.state() is not AgentState.INFECTIVE: return if np.random.random() < state.remove_prob(): if np.random.random() < state.lethality(): agent.set_state(AgentState.DEAD) else: agent.set_state(AgentState.IMMUNE) else: agent.update_sick_days()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def check_status(self):", "def custom_assess_status_check(self):\n options = self.options\n # can check options.thing to ensure that it makes sense\n # if wrong return 'blocked', \"The driver is badly configured ...\"\n return None, None", "def test_get_status(self) -> None:\n\n ...
[ "0.61206144", "0.58532864", "0.5794486", "0.57871175", "0.5704715", "0.5690589", "0.5610464", "0.55541265", "0.5540758", "0.5519943", "0.54881585", "0.5483977", "0.5483977", "0.54801756", "0.54513115", "0.5443968", "0.5439035", "0.5439035", "0.5421981", "0.54105186", "0.54043...
0.0
-1
Updates the agents 'vaccine' before executing other checks
def execute(self, agent: Agent, state: SimState) -> None: if agent.state() == AgentState.SUSCEPTIBLE and self.days == state.vaccine_time() \ and np.random.random() < state.vaccine_share(): agent.set_state(AgentState.IMMUNE)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def ReflexVacuumAgent():\n def program(percept):\n location, status = percept\n if status == 'Dirty':\n return 'Suck'\n elif location == loc_A:\n return 'Right'\n elif location == loc_B:\n return 'Left'\n return Agent(program)", "def update_agent...
[ "0.6244821", "0.61558896", "0.6037154", "0.57862145", "0.56732225", "0.56598973", "0.5629421", "0.56235677", "0.5466714", "0.54048747", "0.53794134", "0.5335644", "0.53047174", "0.52038425", "0.5185621", "0.5175854", "0.51662546", "0.5162844", "0.5150552", "0.51480347", "0.51...
0.562963
6
Event handler to count up the number of days
def __next_step(self, state) -> None: self.days += 1
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_number_days(self):\r\n return 1", "def get_number_days(self):\r\n raise NotImplementedError", "def hindu_day_count(cls, date):\n return date - cls.EPOCH", "def day(self):\n return 0", "def day(self):\n return 0", "def Daysleftverification():\n pass", "def e...
[ "0.7565068", "0.7157228", "0.6552486", "0.6474552", "0.6474552", "0.64385206", "0.63302714", "0.62566036", "0.62210876", "0.61641073", "0.6144609", "0.6134258", "0.6126481", "0.60720074", "0.60308474", "0.59748036", "0.59373456", "0.59308636", "0.59216416", "0.5906378", "0.58...
0.6448241
5
Isolate (Remove from Grid) a given share of infected people for the sicknessduration. Afterwards they need to be added again to the Grid as removed/dead/immune.
def execute(self, agent: Agent, state: SimState) -> None: if agent.is_quarantined(): if agent.state() is AgentState.DEAD or agent.state() is AgentState.IMMUNE or agent.state() is AgentState.REMOVED: grid = agent.grid() for row in range(grid.get_size()): for col in range(grid.get_size()): grid_pos = GridPos(np.uint(row), np.uint(col)) if not grid.is_occupied(grid_pos): grid.set_agent(agent, grid_pos) agent.set_pos(grid_pos) agent.set_quarantined(False) agent.grid().get_quarantinedAgents().remove(agent) state.add_to_quarantined_count(-1) return else: isolate_share = state.quarantine_share() # Share of infected cells to isolate infected = state.infected_count() if agent.state() == AgentState.INFECTIVE and state.get_quarantined_count() < isolate_share * ( infected + state.get_quarantined_count()): agent.set_quarantined(True) agent.grid().get_quarantinedAgents().append(agent) agent.grid().set_agent(None, agent.get_pos()) agent.get_scheduler().update_gui_state(agent.get_pos(), AgentState.EMPTY) state.add_to_quarantined_count(1)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def prune_losers(self):\n self.log.debug(\"PRUNE LOSERS\")\n # check to see if people i followed follow me back\n cutoff_time = (datetime.now()\n - timedelta(hours=self.reciprocation_window))\n ingrates = Target.objects.filter(\n hunter=self.user, status...
[ "0.5290711", "0.5255218", "0.5245789", "0.51121676", "0.50786626", "0.50382346", "0.50287575", "0.49950922", "0.4993932", "0.49557725", "0.4935115", "0.49344", "0.4929344", "0.4913823", "0.4904666", "0.49032527", "0.4873626", "0.487273", "0.48126963", "0.48005226", "0.4798922...
0.0
-1
Return the parsed contents of the config file.
def get_config(): return json.loads(CONFIG_FILE.read_text())
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def read_config(self):\n config = configparser.ConfigParser()\n config.read(self.configfile)\n return config", "def get(self):\n config = self.user_file.parseString(self.content)\n return config", "def get(self):\n if self.file:\n self._read()\n confi...
[ "0.77496666", "0.7607564", "0.75940424", "0.75590175", "0.7350562", "0.7329519", "0.7322378", "0.73064345", "0.7271919", "0.72459453", "0.71848226", "0.7173341", "0.71679926", "0.7166633", "0.71516", "0.7046862", "0.7030489", "0.70298284", "0.6951747", "0.693856", "0.6933751"...
0.76075804
1
Return the Path of the cache file for the key.
def cache_file(cache_key): return MASTOOLS_DIR / f"{cache_key}_cache.json"
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_cache_file_path(self) -> str:\n return self.cache_file_path", "def file_path(self, key=None):\n if key is not None:\n return os.path.join(self.directory, self.file_name(key))\n return getattr(\n self.agent,\n constants.CONST_LOCK_FILE,\n os...
[ "0.81592643", "0.7996879", "0.7945717", "0.7945717", "0.7943165", "0.7943165", "0.7821009", "0.7612195", "0.7606929", "0.74966335", "0.74500173", "0.7417488", "0.73771125", "0.7359973", "0.73446333", "0.7326632", "0.7269066", "0.7237881", "0.7232423", "0.71653837", "0.7153147...
0.7776
7
Return the contents of the cache for the key, if its version is correct.
def load_cache(cache_key, version): # Try to get the results of the last run, but fall back to an empty dict if that's not # available. That's most likely to happen on the first run. try: cache = json.loads(cache_file(cache_key).read_text()) except FileNotFoundError: return {} if cache["version"] != version: raise ValueError( f"Unknown {cache_key} version number: expected {version}, got {cache['version']}" ) return cache[cache_key]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_cache(self, key):\n return self.r.get(key)", "def get(self, key):\n return self.cache_data.get(key)", "def get(self, key):\n # Initialize key variables\n result = self.cache.get(key)\n\n # Return\n return result", "def cache_get(self, key: str) -> Optional[by...
[ "0.7357667", "0.717419", "0.7155493", "0.7136998", "0.6948229", "0.69267094", "0.68619025", "0.68619025", "0.68404466", "0.68349254", "0.68165344", "0.6543805", "0.65131605", "0.64544433", "0.638806", "0.63734585", "0.63579035", "0.63579035", "0.6357669", "0.63486564", "0.634...
0.69202965
6
Write the data to the cache for the key.
def save_cache(cache_key, version, data): # Save these results for the next run. Include the version information and nest the user # information inside a "users" key from the start, because experience says if we don't do this # then the next release will add a feature that requires a change in the data layout, and then # we'll have to write a data migration or something. cache_data = {cache_key: data, "version": version} cache_file(cache_key).write_text(json.dumps(cache_data, indent=2))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _put(self, key, data):\n path = self._get_key_path(key)\n with open(path, \"wb\") as pickle_file:\n pickle.dump(data, pickle_file)", "def update(self, cache_key):\r\n self._write_sha(cache_key)", "def write_data_cache(self, data):\n assert data, 'Must input a non-empty di...
[ "0.75162756", "0.7494944", "0.73132676", "0.72090346", "0.7136881", "0.7136881", "0.7108822", "0.7058452", "0.7058452", "0.70342314", "0.7015688", "0.7011552", "0.6970053", "0.69598436", "0.6954975", "0.6933723", "0.6902636", "0.6839885", "0.6836937", "0.68292135", "0.6814033...
0.682781
20
seed users. by defualt set to 5 users
def seed_User(number=5, overwrite=False): if overwrite: print('Overwriting all users') User.objects.all().delete() count = 0 for i in range(number): username = fake.first_name() User.objects.create_user( email=username + "@blogmail.com", password="vns12345", name=username, date_joined=datetime.datetime.now(), is_active=1, is_superadmin=0, avatar='', is_staff=1 ) count += 1 percent_complete = count / number * 100 print( "Adding {} new Users: {:.2f}%".format( number, percent_complete), end='\r', flush=True ) print()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def generate_users(count=10):\n for i in range(count):\n user = generate_random_user()\n db.session.add(user)\n db.session.commit()", "def setUp(self):\n self.users = [UserFactory.create() for i in range(5)]", "def create_users(N):\n for _ in range(N):\n name = fake.name()\...
[ "0.76051784", "0.73858875", "0.7105484", "0.7039456", "0.7026748", "0.70025635", "0.69474685", "0.69027597", "0.68793637", "0.6835339", "0.6813174", "0.6786388", "0.6739348", "0.6732933", "0.6717712", "0.66460043", "0.66320664", "0.6586459", "0.6585731", "0.65754217", "0.6552...
0.7705716
0
__init__ initializes an instance of the BlackBoxGame class
def __init__(self, atoms): self._board = Board.Board(atoms) self._score = 25 self._atoms = self._board.get_atoms() self._guesses = []
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __init__(self):\n self._game_state = \"UNFINISHED\"\n self._current_player = \"BLACK\"\n self._game_board = Board()", "def __init__(self):\r\n self._board = None\r\n self._bb_settings = Settings()\r\n self._screen = pygame.display.set_mode((self._bb_settings.screen_w...
[ "0.7896587", "0.7875345", "0.7770364", "0.7609544", "0.7590157", "0.75064385", "0.7486412", "0.73641217", "0.7349547", "0.7331177", "0.72932756", "0.72914916", "0.72420853", "0.72406125", "0.7235098", "0.72264135", "0.72215843", "0.7206377", "0.7200916", "0.71846515", "0.7148...
0.0
-1
get_board returns copy of the game's _board object
def get_board(self): return self._board
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_board(self):\n return self.board.copy()", "def get_board(self):\n return copy.deepcopy(self.board)", "def get_game_board(self):\n return self.board", "def get_board(self):\r\n return self.board", "def get_board(self):\n return self.board", "def get_board(self):\...
[ "0.85879415", "0.8494425", "0.8370379", "0.8286644", "0.8171239", "0.81531906", "0.8135133", "0.80969197", "0.7896076", "0.784677", "0.76426995", "0.7624993", "0.7480111", "0.7113091", "0.6975937", "0.68170214", "0.68170214", "0.68170214", "0.68170214", "0.68170214", "0.67700...
0.8100259
8
get_score returns the current score
def get_score(self): return self._score
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_score(self):\n return self.score", "def get_score(self):\n return self.score", "def get_score(self):\n return self.score", "def get_score(self):\n return self.score", "def getScore(data):\n return score", "def get_score(self):\n return self.__score", "def get_s...
[ "0.8582769", "0.8516198", "0.8516198", "0.8516198", "0.8295523", "0.82583547", "0.8242513", "0.816506", "0.805192", "0.80405056", "0.8029793", "0.8004031", "0.7996733", "0.783279", "0.77552474", "0.7745425", "0.76711756", "0.7645722", "0.7591186", "0.7586955", "0.753339", "...
0.8245853
8
set_score increments the score by change can be negative
def set_score(self, change): self._score = self._score + change
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def set_score(self,score):\n self._score = score", "def set_score(self, score):\n self._score = score", "def update_score():\n pass", "def set_score(self, a, b, score):\n ### FILL IN ###", "def set_score(self, score):\n # Update the score display\n self.score = sco...
[ "0.82878745", "0.8127291", "0.81113905", "0.8077499", "0.7966757", "0.796316", "0.796316", "0.796316", "0.79581094", "0.78421885", "0.7841102", "0.7821862", "0.78135276", "0.77125627", "0.7695137", "0.76055825", "0.75447255", "0.75010866", "0.7482682", "0.7436947", "0.7427076...
0.830666
0
move_ray this is the primary function which is responsible for recursively moving a ray. Although it primarily look after the action of the Ray.Ray class it lives in the Game instance itself. THIS IS HOW WE DETERMINE THE EXIT POINT OF ALL RAYS HORIZONTAL, VERTICAL, OR WITH DETOURS
def move_ray(self, ray): # look to the next spot in the ray's trajectory next_coordinates = ray.get_next_location() next_location = self._board.get_board_square(next_coordinates) # check for a collisition - return if it occurs if ray.check_for_collision(next_location): return # if we didn't collide as we moved we need to look to check our # diagonals for atoms ccw_diag_coordinates, cw_diag_coordinates = ray.get_diagonals() ccw_diagonal = self._board.get_board_square(ccw_diag_coordinates) cw_diagonal = self._board.get_board_square(cw_diag_coordinates) if ccw_diagonal.is_atom() or cw_diagonal.is_atom(): # If we're on our first move and the immediately diagonals contain an atom we have a reflection if ray.get_current_location() == ray.get_origin_location(): terminal_square = self._board.get_board_square( ray.get_current_location()) # let's the ray know it's finished and the square that it's an endpoint # self.end_ray(ray, terminal_square) return ray.record_edge_collision(terminal_square) # otherwise they cause a bend in the path else: # we have to calculate our trajectory based on the pull # of the atoms in our path ray.recalculate_trajectory(ccw_diagonal, cw_diagonal) # get the coordinates of the next location in our new trajectory next_coordinates = ray.get_next_location() # determine the next coordinate will result in a collision - return if it would if ray.check_for_collision( self._board.get_board_square(next_coordinates)): return # move the ray to the next step forward in its current trajectory ray.set_current_location(next_coordinates) # finally, recursively call our current function from the next step in its path. self.move_ray(ray)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def shoot_ray(self, row, col):\n # Uses validate method to check if row,col are legal for ray entrance location\n if not self.valid_ray(row, col):\n return False\n # creates ray object from row, col integers\n ray = Ray(row, col)\n # checks if atom is in front of entra...
[ "0.6431586", "0.6042099", "0.5938072", "0.5936508", "0.592946", "0.58522797", "0.56683385", "0.5636461", "0.5598364", "0.55645293", "0.55484056", "0.5496541", "0.54507935", "0.543731", "0.5407489", "0.5379115", "0.53595096", "0.53525144", "0.534523", "0.5339988", "0.5306413",...
0.71545416
0
shoot_ray shoots a ray from a given row and column if possible
def shoot_ray(self, origin_row, origin_column): # get the the square object at row x column origin = self._board.get_board_square((origin_row, origin_column)) # check that it is a valid "edge" to send a ray from origin_check = origin.is_edge() # if it's not then return false if origin_check == False: return False # if we pass the origin check create shoot a new Ray.Ray object from row x column new_ray = Ray.Ray(origin_row, origin_column) # let the square we shot from know its an orign square origin.set_originating_ray(new_ray) # Deduct 1 from the score since we now have on exit point self.set_score(-1) # while the ray object has a direction (will be set to none when it reaches an endpoint) # send it to the helper function that will move it while new_ray.get_direction() != None: self.move_ray(new_ray) # if we hit an exit point (other than through reflection) deduct the point for that terminus = new_ray.get_terminal_location() # check the the terminal point is an edge (hitting an atom returns none as terminus) if terminus != None: # check that the terminus is not a reflection, which shouldn't be counted twice terminal_square = self._board.get_board_square(terminus) terminal_square.set_terminating_ray(new_ray) if terminus != (origin_row, origin_column): self.set_score(-1) return terminus
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def shoot_ray(self, row, col):\n # Uses validate method to check if row,col are legal for ray entrance location\n if not self.valid_ray(row, col):\n return False\n # creates ray object from row, col integers\n ray = Ray(row, col)\n # checks if atom is in front of entra...
[ "0.78674567", "0.6780156", "0.67677057", "0.61004895", "0.5816195", "0.5722863", "0.5721122", "0.56699514", "0.566448", "0.5559462", "0.5555234", "0.55531144", "0.55380315", "0.55212104", "0.55208117", "0.5480533", "0.54755425", "0.5391073", "0.53854626", "0.5371789", "0.5366...
0.748143
1
guess_atoms a function allowing a player to input guesses as to the locations of atoms on the board
def guess_atom(self, row, column): if [row, column] in self._atoms: # if an tom was properly guessed remove it from the atom's array # and return True, append the guess to the guesses array, and #remove it from the available atoms to guess from. self._guesses.append([row, column]) self._atoms.remove([row, column]) return True # otherwise deduct five points and return false self.set_score(-5) # add the guess to the guesses array self._guesses.append([row, column]) return False
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def guess_atom(self, row, col):\n if not self.valid_guess(row, col):\n return False\n # if row,col in guess list, tells players and returns True\n if self._board[row][col] != \" \":\n print(\"You've already guessed that location!\")\n return True\n # if ...
[ "0.71533525", "0.67306685", "0.5900517", "0.58870995", "0.5859277", "0.5668452", "0.54721373", "0.5391912", "0.5355076", "0.5267006", "0.5263563", "0.5250836", "0.5222762", "0.52184594", "0.5217904", "0.51913744", "0.5180138", "0.51650697", "0.51289845", "0.5128813", "0.51253...
0.6491031
2
atoms_left returns the number of unguessed atoms still left
def atoms_left(self): return len(self._atoms)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def atoms_left(self):\n return self._atoms", "def atoms_left(self):\r\n return self._board.get_atoms()", "def num_pieces_left(self):\n return self.num_white_pieces + self.num_black_pieces", "def count_mass_left(self):\n self.mass_left_count = int(np.sum(self.array))", "def numbe...
[ "0.7521378", "0.74469197", "0.6469958", "0.6355561", "0.6071514", "0.6057757", "0.6004638", "0.59833574", "0.59616107", "0.5921324", "0.5888561", "0.5864343", "0.5864343", "0.5818085", "0.57945406", "0.57681686", "0.5768063", "0.5694106", "0.56850934", "0.56428653", "0.559667...
0.84319353
0
Test LSTM, LayerNormLSTM and NAS gnmt encoder. GNMT has only a single bi directional layer, and num_layers1 uni layers. time_major=True
def runLSTMEncoder(self, encoder, num_layers): inputs_ph = tf.placeholder( dtype=tf.float32, shape=(None, common_utils.TIME_STEPS, common_utils.DEPTH)) inputs_length_ph = tf.placeholder(dtype=tf.int32, shape=(None)) outputs, states = encoder.encode( mode=tf.estimator.ModeKeys.TRAIN, sequence_inputs=inputs_ph, sequence_length=inputs_length_ph) num_bi_layers = 1 num_uni_layers = num_layers - num_bi_layers if num_uni_layers == 1: # states is a tuple of (states_bi_bw, states_uni) # states_bi_bw is a tuple (states_bi_bw) # states_uni is a tuple of length num_uni_layers states_bi_bw, states_uni = states self.assertEqual(1, len(states_bi_bw)) self.assertEqual(num_uni_layers, len(states_uni)) # states_bi_bw[0] is a tuple of (states_c, states_h) self.assertEqual(2, len(states_bi_bw[0])) # convert states from tuple to tensor states_list = [states_bi_bw[0]] for i in range(num_uni_layers): states_list.append(states_uni[i]) states = tf.convert_to_tensor(states_list) else: # states is a tuple of (states_uni) of length num_uni_layers states_uni = states self.assertEqual(num_uni_layers, len(states_uni)) states_list = [] for i in range(num_uni_layers): states_list.append(states_uni[i]) states = tf.convert_to_tensor(states_list) inputs, inputs_length = common_utils.get_encoder_test_inputs() with self.test_session() as sess: sess.run(tf.global_variables_initializer()) outputs, states = sess.run( [outputs, states], feed_dict={ inputs_ph: inputs, inputs_length_ph: inputs_length }) self.assertAllEqual( [common_utils.TIME_STEPS, common_utils.BATCH_SIZE, common_utils.DEPTH], outputs.shape) if num_uni_layers == 1: self.assertEqual(num_layers, len(states)) # 2 in second dimension means states_c and states_h self.assertAllEqual( [num_layers, 2, common_utils.BATCH_SIZE, common_utils.DEPTH], states.shape) else: self.assertEqual(num_uni_layers, len(states)) self.assertAllEqual( [num_uni_layers, 2, common_utils.BATCH_SIZE, common_utils.DEPTH], states.shape)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_model_with_lstm_layer_time_major_true(self):\n\n tf.compat.v1.reset_default_graph()\n sess = tf.compat.v1.Session()\n with sess.graph.as_default():\n inputs = tf.keras.Input(shape=(3, 100))\n\n # Defaults\n # return_state=False, unit_forget_bias=True\n...
[ "0.7174004", "0.6981176", "0.6942284", "0.66725725", "0.65949506", "0.6501579", "0.6480307", "0.6405576", "0.6309258", "0.6258608", "0.62536645", "0.6212986", "0.6177987", "0.61688703", "0.6160826", "0.6120184", "0.6105675", "0.6078574", "0.6066392", "0.60473734", "0.60453326...
0.6794371
3
Test GRU gnmt encoder. time_major=True
def runGRUEncoder(self, encoder, num_layers): inputs_ph = tf.placeholder( dtype=tf.float32, shape=(None, common_utils.TIME_STEPS, common_utils.DEPTH)) inputs_length_ph = tf.placeholder(dtype=tf.int32, shape=(None)) outputs, states = encoder.encode( mode=tf.estimator.ModeKeys.TRAIN, sequence_inputs=inputs_ph, sequence_length=inputs_length_ph) num_bi_layers = 1 num_uni_layers = num_layers - num_bi_layers if num_uni_layers == 1: states_bi_bw, states_uni = states # states_bi_bw = (states_bi_bw,) self.assertEqual(1, len(states_bi_bw)) self.assertEqual(num_uni_layers, len(states_uni)) # unlike lstm, whose states is a tuple of (c,h), # gru states has only one element # states_bi_bw[0] is a states tensor states_list = [states_bi_bw[0]] for i in range(num_uni_layers): states_list.append(states_uni[i]) states = tf.convert_to_tensor(states_list) else: states_uni = states self.assertEqual(num_uni_layers, len(states_uni)) states_list = [] for i in range(num_uni_layers): states_list.append(states_uni[i]) states = tf.convert_to_tensor(states_list) inputs, inputs_length = common_utils.get_encoder_test_inputs() with self.test_session() as sess: sess.run(tf.global_variables_initializer()) outputs, states = sess.run( [outputs, states], feed_dict={ inputs_ph: inputs, inputs_length_ph: inputs_length }) self.assertAllEqual( [common_utils.TIME_STEPS, common_utils.BATCH_SIZE, common_utils.DEPTH], outputs.shape) if num_uni_layers == 1: self.assertEqual(num_layers, len(states)) self.assertAllEqual( [num_layers, common_utils.BATCH_SIZE, common_utils.DEPTH], states.shape) else: self.assertEqual(num_uni_layers, len(states)) self.assertAllEqual( [num_uni_layers, common_utils.BATCH_SIZE, common_utils.DEPTH], states.shape)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_golay_module1(self):\r\n sent = golay.encode([0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0])\r\n rec = sent[:-1] + 'C' # possible error here\r\n decoded, errors = golay.decode(rec)\r\n self.assertEqual(decoded, sent)\r\n self.assertLess(errors, 1.5)\r\n rec = sent[:-1] + '...
[ "0.53550947", "0.5301906", "0.5206506", "0.49940667", "0.49887952", "0.49855092", "0.49552405", "0.48806253", "0.4856835", "0.48504332", "0.48328564", "0.48092747", "0.48080942", "0.477451", "0.475015", "0.47448063", "0.47433698", "0.47394577", "0.47322595", "0.47050416", "0....
0.5863373
0
Create a CourseGraph, fetching unitary weights and edge weights from database, creating CourseNodes for each course, and
def __init__(self, database, session, max_suggestions=5, max_courses=30, cache_mult=4): self._nodes = dict() # dict with courseid keys, CourseNode vals self._max_suggestions = max_suggestions self._max_courses = max_courses self._cache_mult = cache_mult db = database # Get dict mapping courses to unitary weights unitary_dict = db.get_unitary_dict(session) # Get dict mapping courses to adjacent courses and weights edge_dict = db.get_edges_dict(session) # Create CourseNodes for courseid in unitary_dict: courseNode = CourseGraph.CourseNode(courseid=courseid, edges=dict(), popularity=unitary_dict[courseid]) self._nodes[courseid] = courseNode # Create course edge dict for each CourseNode for courseid in edge_dict: node = self._nodes[courseid] # get node of interest adj_courses = edge_dict[courseid] # get inner dict {otherid: edge_weight} for otherid in adj_courses: other_node = self._nodes[otherid] node.addEdge(other_node, adj_courses[otherid])
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _CreateGraph(self):\n self.nodes = []\n self.edges = []\n for i, r in self.airports.set_index('airport_id').iterrows():\n self.nodes.append((i,r.to_dict()))\n for i, r in self.routes.set_index(['src_id','dst_id']).iterrows():\n self.edges.append((i[0],i[1],r.to...
[ "0.610946", "0.6068693", "0.5970327", "0.59139115", "0.5910554", "0.59065074", "0.5842171", "0.58402723", "0.5831642", "0.5821705", "0.58180076", "0.5813589", "0.5796841", "0.57905227", "0.5715409", "0.5708784", "0.56940794", "0.5655929", "0.5635845", "0.5629975", "0.56149757...
0.7083467
0
Gets the crosslistings of the top edges from a course
def getTopEdgesFrom(self, session, courseid): node = self.getNode(courseid) # get CourseNode if not node: return [] edges = node.getEdges() # get its Edge dict return sorted(edges.keys(), key=lambda k: edges[k], reverse=True)[:5]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def assembly_courses(wall):\n courses = []\n vertices = set(wall.nodes())\n base = set(wall.nodes_where({'is_support': True}))\n\n if base:\n courses.append(list(base))\n\n seen = set()\n seen.update(base)\n\n vertices -= base\n\n while vertices:\n nbrs = s...
[ "0.59826374", "0.55603427", "0.5523582", "0.5495732", "0.5461872", "0.54479295", "0.53818995", "0.5318768", "0.52677137", "0.5246417", "0.520737", "0.5162896", "0.5158995", "0.51440036", "0.51215637", "0.5089466", "0.50795156", "0.50770104", "0.5055389", "0.5054957", "0.50231...
0.72403175
0
Return filename of a submission downloaded from synapse.
def downloadSubmissionAndFilename(self, sub, downloadFile=True, **kargs): if isinstance(sub, dict) == False: raise TypeError("input must be a submission (dictionary)") if downloadFile == False: filename = self.getSubmission(sub, downloadFile=False)['filePath'] else: filename = self.getSubmission(sub, downloadFile=True, **kargs)['filePath'] return filename
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def download_filename(self) -> str:\n return self._download_filename", "def get_download_file_name(self):\n # Use 'unknown' if the course instance does not have a term\n if self.course_instance.term:\n term = self.course_instance.term.get_url_name()\n else:\n ter...
[ "0.7089564", "0.70576966", "0.69732934", "0.69194645", "0.68737155", "0.68652344", "0.6865232", "0.68460745", "0.6798308", "0.6795707", "0.66493416", "0.66368407", "0.6624542", "0.6617163", "0.66041523", "0.6603908", "0.65955335", "0.65814525", "0.6547018", "0.6510974", "0.64...
0.6980007
2
Transform relevant object into json object
def json(self, data): import json data = json.dumps(data) return data
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def object_to_json(self, json_data):\n result = {}\n if sys.version_info[0] < 3:\n itr = json_data.__dict__.iteritems()\n else:\n itr = json_data.__dict__.items()\n for key,value in itr:\n # Skip internal attributes.\n if key.startswith(\"__\"...
[ "0.69116586", "0.6882489", "0.68524873", "0.6822507", "0.6781035", "0.66899097", "0.65613455", "0.65372014", "0.65137166", "0.647605", "0.6443874", "0.6437841", "0.64342636", "0.6407022", "0.63898367", "0.6371773", "0.6358821", "0.63180697", "0.6306601", "0.6302604", "0.63005...
0.0
-1
r"""Factorized Prior model from J. Balle, D. Minnen, S. Singh, S.J. Hwang,
def bmshj2018_factorized( quality, metric="mse", pretrained=False, progress=True, **kwargs ): if metric not in ("mse", "ms-ssim"): raise ValueError(f'Invalid metric "{metric}"') if quality < 1 or quality > 8: raise ValueError(f'Invalid quality "{quality}", should be between (1, 8)') return _load_model( "bmshj2018-factorized", metric, quality, pretrained, progress, **kwargs )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def bias_prior(self):", "def lnprior(self):\n \n return", "def prior_predictive(self):\n cfg = self.config\n n = cfg['batch_size'] * cfg['q/n_samples']\n n_samples = cfg['q/n_samples']\n with util.get_or_create_scope('model', reuse=True):\n h_prior = tf.cast(self.p_h_L.sample(n),...
[ "0.6737038", "0.636095", "0.607639", "0.60483646", "0.60367125", "0.60130304", "0.59627426", "0.59598047", "0.59528166", "0.5932962", "0.5882837", "0.58736926", "0.5853717", "0.5850431", "0.5840175", "0.5834046", "0.57900655", "0.57882077", "0.5787185", "0.57477564", "0.57408...
0.0
-1
r"""Scale Hyperprior model from J. Balle, D. Minnen, S. Singh, S.J. Hwang,
def bmshj2018_hyperprior( quality, metric="mse", pretrained=False, progress=True, **kwargs ): if metric not in ("mse", "ms-ssim"): raise ValueError(f'Invalid metric "{metric}"') if quality < 1 or quality > 8: raise ValueError(f'Invalid quality "{quality}", should be between (1, 8)') return _load_model( "bmshj2018-hyperprior", metric, quality, pretrained, progress, **kwargs )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def scale(self):", "def bias_prior(self):", "def set_hyper_parameters(self, x):\n self.set_scale(x[0])", "def setPrior(self,xPrior,priorWeight):\n assert self.regularizationLambda == 0\n if not isinstance(xPrior,np.ndarray):\n xPrior = np.array(xPrior)\n self.count = 1\...
[ "0.62483156", "0.62137103", "0.6170398", "0.6155527", "0.6027734", "0.59788775", "0.5940418", "0.5933784", "0.5926682", "0.59190893", "0.5883833", "0.5880828", "0.58511233", "0.58176", "0.580629", "0.57443565", "0.57379764", "0.56918335", "0.56906885", "0.5651347", "0.5624571...
0.5472967
34
r"""Scale Hyperprior with non zeromean Gaussian conditionals from D.
def mbt2018_mean(quality, metric="mse", pretrained=False, progress=True, **kwargs): if metric not in ("mse", "ms-ssim"): raise ValueError(f'Invalid metric "{metric}"') if quality < 1 or quality > 8: raise ValueError(f'Invalid quality "{quality}", should be between (1, 8)') return _load_model("mbt2018-mean", metric, quality, pretrained, progress, **kwargs)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def gaussian_prior(self):\n self.prior = sps.multivariate_normal(self.m0,self.S0)", "def _likelihood_der1_scale(self, hyperparam):\n\n # Get eta\n eta = self._hyperparam_to_eta(hyperparam)\n\n # Set scale of the covariance object\n scale = self._hyperparam_to_scale(hyperparam[s...
[ "0.69035596", "0.60564435", "0.5897708", "0.5843555", "0.5838015", "0.5830661", "0.5816271", "0.5786775", "0.5709402", "0.56590503", "0.56590503", "0.56466454", "0.5611644", "0.5611628", "0.5609158", "0.5600361", "0.55973405", "0.55847114", "0.55843025", "0.5575209", "0.55476...
0.0
-1
r"""Joint Autoregressive Hierarchical Priors model from D.
def mbt2018(quality, metric="mse", pretrained=False, progress=True, **kwargs): if metric not in ("mse", "ms-ssim"): raise ValueError(f'Invalid metric "{metric}"') if quality < 1 or quality > 8: raise ValueError(f'Invalid quality "{quality}", should be between (1, 8)') return _load_model("mbt2018", metric, quality, pretrained, progress, **kwargs)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __init__(self, D, H, S, to_learn=[\"W\", \"pi\", \"sigma\"], comm=MPI.COMM_WORLD):\n self.comm = comm\n self.noise_policy = {}\n self.to_learn = to_learn\n self.D = D\n self.H = H\n self.S = S\n\n tol = 1e-5\n self.noise_policy = {\n \"W\": (-n...
[ "0.55733985", "0.5363515", "0.53536046", "0.53175926", "0.526763", "0.5229534", "0.52215683", "0.51651067", "0.51620126", "0.5120631", "0.5106703", "0.50803024", "0.5077998", "0.50690013", "0.5067703", "0.50612646", "0.5049136", "0.50420725", "0.5038924", "0.50167084", "0.501...
0.0
-1
r"""Anchor model variant from `"Learned Image Compression with Discretized Gaussian Mixture Likelihoods and Attention Modules"
def cheng2020_anchor(quality, metric="mse", pretrained=False, progress=True, **kwargs): if metric not in ("mse", "ms-ssim"): raise ValueError(f'Invalid metric "{metric}"') if quality < 1 or quality > 6: raise ValueError(f'Invalid quality "{quality}", should be between (1, 6)') return _load_model( "cheng2020-anchor", metric, quality, pretrained, progress, **kwargs )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def construct_model():\n import lbann\n\n # Layer graph\n input = lbann.Input(target_mode='N/A', name='inp_data')\n # data is 64*64*4 images + 15 scalar + 5 param\n #inp_slice = lbann.Slice(input, axis=0, slice_points=\"0 16399 16404\",name='inp_slice')\n inp_slice = lbann.Slice(input, axis=0, sl...
[ "0.6148643", "0.5812919", "0.5794156", "0.54624885", "0.53823245", "0.5287905", "0.52859586", "0.5282076", "0.52552813", "0.5201943", "0.51982266", "0.5182037", "0.51819533", "0.515083", "0.5144859", "0.5127486", "0.5112405", "0.509996", "0.5075668", "0.50655454", "0.5056296"...
0.5076744
18
r"""Selfattention model variant from `"Learned Image Compression with Discretized Gaussian Mixture Likelihoods and Attention Modules"
def cheng2020_attn(quality, metric="mse", pretrained=False, progress=True, **kwargs): if metric not in ("mse", "ms-ssim"): raise ValueError(f'Invalid metric "{metric}"') if quality < 1 or quality > 6: raise ValueError(f'Invalid quality "{quality}", should be between (1, 6)') return _load_model( "cheng2020-attn", metric, quality, pretrained, progress, **kwargs )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def self_attention_model(self):\n inputs = self.prepare_inputs()\n \n features = self.prepare_features(inputs)\n support_history_context_concat_emb = features['support_history_context_concat_emb']\n support_link_info_concat_emb = features['support_link_info_concat_emb']\n ...
[ "0.65408075", "0.64289623", "0.6141969", "0.6130033", "0.61221105", "0.58673143", "0.586286", "0.5794737", "0.575147", "0.573981", "0.5708623", "0.5706052", "0.57047206", "0.56855595", "0.5660355", "0.5646968", "0.55968773", "0.5591234", "0.55796915", "0.5566003", "0.55493313...
0.0
-1
Entry point to gameplay.
def main() -> None: game = advanced_game(MAP_FILE) root = tk.Tk() root.title('EndOfDayz') if TASK == 1: gui = BasicGraphicalInterface elif TASK == 2: gui = ImageGraphicalInterface # else: # gui = MastersGraphicalInterface app = gui(root, game.get_grid().get_size()) app.play(game) root.mainloop()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def play_game():\n pass", "def play_game():\n pass", "def game_play(self):", "def play(self):\n print('Playing game...')", "def start_game(self):\n\n\t\tpass", "def main():\n g = Game(800, 600)\n g.start()", "def main():\r\n gameclass = data.game.GameClass()\r\n gameclass.m...
[ "0.8527575", "0.8128602", "0.8080839", "0.79423046", "0.79204637", "0.7907992", "0.78166646", "0.77512056", "0.7606996", "0.7596167", "0.75618863", "0.75584793", "0.7554812", "0.75141007", "0.7511485", "0.7502417", "0.7497473", "0.7493972", "0.74235296", "0.7420933", "0.73897...
0.6931555
42
Retrieve, update or delete a code snippet.
def student_detail(request, pk): try: students = student.objects.get(pk=pk) except students.DoesNotExist: return HttpResponse(status=404) if request.method == 'GET': serializer = studentSerializer(students) return JsonResponse(serializer.data) elif request.method == 'PUT': data = JSONParser().parse(request) serializer = studentSerializer(students, data=data) if serializer.is_valid(): serializer.save() return JsonResponse(serializer.data) return JsonResponse(serializer.errors, status=400) elif request.method == 'DELETE': students.delete() return HttpResponse(status=204)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def snippet_detail(request, pk):\n try:\n snippet = Snippet.objects.get(pk=pk)\n except Snippet.DoesNotExist:\n return Response(status=status.HTTP_404_NOT_FOUND)\n\n if request.method == 'GET':\n serializer = SnippetSerializer(snippet)\n return Response(serializer.data)\n\n ...
[ "0.6433584", "0.6433584", "0.6343745", "0.62817955", "0.62216586", "0.6008872", "0.59082586", "0.5902986", "0.58660686", "0.581822", "0.5809916", "0.5748296", "0.57467324", "0.57114553", "0.56778467", "0.55851805", "0.5559712", "0.54972357", "0.54327506", "0.5384471", "0.531"...
0.0
-1
Loads a json value from a file and converts it to the corresponding python object.
def loadJsonValueFromFile(inputFilePath): with open(inputFilePath) as fileObj: value = json.load(fileObj) return value
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def load(cls, file):\n with open(file, \"r\") as f:\n j = json.load(f)\n return cls(**j)", "def load_from_json_file(filename):\n with open(filename, 'r') as f:\n obj = json.loads(f.read())\n return obj", "def convert_json_to_object(file_content):\n object = json...
[ "0.7691063", "0.76900375", "0.76079774", "0.7595927", "0.75809187", "0.7557681", "0.7495783", "0.74881333", "0.74459565", "0.7439162", "0.7416306", "0.7365671", "0.73619384", "0.7338387", "0.73190105", "0.7312332", "0.7281531", "0.7272493", "0.72698015", "0.7256082", "0.72544...
0.74380505
10
Initializes turtle instance for turtle game.
def initialize(turtle_shape, bg_color, turtle_color, turtle_speed): turtle_instance = turtle.Turtle() turtle_instance.shape(turtle_shape) turtle.bgcolor(bg_color) turtle_instance.color(turtle_color) turtle_instance.speed(turtle_speed) return turtle_instance
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def init_turtle():\n turtle.up()\n turtle.home()", "def __init__(self, commands=[], turtle_name=\"Terry\", speed=6, shape=\"classic\"):\n super().__init__()\n turtle.colormode(255)\n self._name = turtle_name\n super().speed(speed)\n super().shape(shape)\n self.comm...
[ "0.81658715", "0.7127116", "0.704404", "0.6922751", "0.6751538", "0.66928184", "0.6515024", "0.64465225", "0.6263604", "0.62292695", "0.6227585", "0.61615515", "0.58694047", "0.58318806", "0.580501", "0.575872", "0.57573175", "0.57200843", "0.5657598", "0.55897486", "0.555192...
0.8317453
0
Defines the turtle movement for the initialized turtle instance and executes that movement.
def turtle_movement(turtle_shape, bg_color, turtle_color, turtle_speed): turtle_name = initialize(turtle_shape, bg_color, turtle_color, turtle_speed) for i in range(36): for i in range(4): turtle_name.forward(200) turtle_name.right(90) turtle_name.right(10)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def move_turtle(self):\n self.forward(self.move_speed)", "def setTurtle(t):\r\n t.pu()\r\n t.goto(initialCoordinates())", "def init_turtle():\n turtle.up()\n turtle.home()", "def move():\n Robot.move()", "def movement(self):", "def move(self):\r\n segments = len(self.all_turt...
[ "0.7389288", "0.6786604", "0.6750962", "0.6614437", "0.6599587", "0.64865434", "0.6407805", "0.64050645", "0.6318544", "0.6280543", "0.6203746", "0.61156297", "0.6057648", "0.6023432", "0.60230684", "0.600326", "0.59755194", "0.5961362", "0.59582186", "0.59399325", "0.5905899...
0.69048256
1
Add this command to the main parser.
def add_commands(parser, subparsers): subparser = subparsers.add_parser('libraries', help='search for LogicBlox libraries') subparser.set_defaults(func=execute_libraries) subparser.add_argument('libraries', nargs='*', help="libraries to locate") subparser.add_argument('--libpath', help="library path to search") subparser.add_argument('--dependencies', '-d', default=False, action='store_true', help="print the libraries upon which a library depends") subparser.add_argument('--quiet', '-q', default=False, action='store_true', help="do not display any information. Used when simply querying the exit code") subparser
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def extend_cli(self, subparser):", "def add_args(self, parser):", "def add(self, name, command):", "def _add_to_cli(self, parser, group=None):\n dest = self.dest\n if group is not None:\n dest = group.name + '_' + dest\n\n subparsers = parser.add_subparsers(dest=dest,\n ...
[ "0.728577", "0.7169099", "0.7166533", "0.7153756", "0.7049907", "0.7003788", "0.7001368", "0.69773185", "0.6960154", "0.6897418", "0.68806356", "0.6854973", "0.6824182", "0.6771863", "0.6764204", "0.67025226", "0.6698706", "0.6660946", "0.6652996", "0.6588826", "0.6572274", ...
0.0
-1
Saves summary statistics as a csv file in the current directory and returns the output filename.
def save_summary_statistics_csv( experiment_name, roi_summary_data, save_directory_path: str = "" ): # Create directories on the path if they don't already exist Path(save_directory_path).mkdir(parents=True, exist_ok=True) csv_filename = f"{experiment_name} - summary statistics (generated {iso_datetime_for_filename(datetime.now())}).csv" csv_filepath = Path(save_directory_path) / csv_filename roi_summary_data.to_csv(csv_filepath, index=False) print(f"Summary statistics saved to: {csv_filepath}\n") return csv_filepath
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def checkpoint_stats(self, stats):\n stats.to_csv(\n self.params.stat.dir + self.params.model.name + \"_\" + self.params.data.name + \".stat\",\n sep='\\t',index=False,header=True)", "def save_csv_file():\n global output_on_display, import_lst, column_names, data\n if data_base...
[ "0.67156446", "0.6640688", "0.65309376", "0.65059", "0.6463663", "0.6374038", "0.6361525", "0.632994", "0.63163084", "0.62892014", "0.6272734", "0.62425643", "0.62299234", "0.62213546", "0.6216482", "0.6173342", "0.6157177", "0.6122196", "0.61156815", "0.60866743", "0.6001306...
0.78815484
0
Opens all JPEG+RAW images in the specified experiment directory and returns as a map of
def get_raw_image_paths_for_experiment(local_sync_directory_path, experiment_directory): raw_images_directory = os.path.join(local_sync_directory_path, experiment_directory) raw_image_paths = get_files_with_extension(raw_images_directory, ".jpeg") return pd.Series(raw_image_paths)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def load_sample_images():\n # Try to import imread from scipy. We do this lazily here to prevent\n # this module from depending on PIL.\n try:\n try:\n from scipy.misc import imread\n except ImportError:\n from scipy.misc.pilutil import imread\n except ImportError:\n...
[ "0.66397595", "0.6540607", "0.64433455", "0.64104563", "0.6374026", "0.63023806", "0.6257343", "0.6191441", "0.61159426", "0.6109508", "0.6003954", "0.5990808", "0.59906566", "0.59873354", "0.59793514", "0.5957272", "0.59243476", "0.59169495", "0.58602643", "0.58406574", "0.5...
0.62735325
6
stack pandas DataFrames logically into a bigger DataFrame, resets the index of the resulting DataFrame to avoid duplicates in the index
def _stack_dataframes(dataframes: List[pd.DataFrame]) -> pd.DataFrame: return pd.concat(dataframes).reset_index(drop=True)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_reset_index(self):\n\n # reminder on multi index in columns\n df1 = pd.DataFrame([[1, 3], [2, 4], [11, 33], [22, 44]]).T\n df1.index = pd.Series([1, 2], name=\"idx1\")\n df1.columns = pd.MultiIndex.from_product([['a', 'b'], ['aa', 'bb']], names=['idx_c', 'idx2'])\n\n # s...
[ "0.5980725", "0.5887287", "0.5851793", "0.5812862", "0.5757257", "0.57415146", "0.57319605", "0.5689936", "0.56405115", "0.56170523", "0.56154037", "0.5591578", "0.5586357", "0.5564776", "0.55478466", "0.554424", "0.5535979", "0.54988015", "0.5480869", "0.54719794", "0.545409...
0.66242844
0
stack pandas Series logically into a DataFrame
def _stack_serieses(serieses: List[pd.Series]) -> pd.DataFrame: return pd.concat(serieses, axis="columns").T
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def data_structure():\n\n items = [1.0, 2.0, 3.0, 4.0, 5.0 ]\n s = pd.Series(items, index=['a', 'b', 'c', 'd', 'e'])\n # s = pd.Series(np.random.randn(5), index=['a', 'b', 'c', 'd', 'e'])\n print s\n s = pd.Series(items)\n print s\n\n d= {'one': [1.0, 2.0, 3.0, 4.0], 'two': [4.0, 3.0, 2.0, 1.0...
[ "0.6693489", "0.64312357", "0.6289627", "0.61618704", "0.59844136", "0.59790695", "0.5950793", "0.5896647", "0.58869344", "0.58781815", "0.577991", "0.5733921", "0.56654763", "0.566396", "0.56531364", "0.56504464", "0.5612756", "0.56065685", "0.5595002", "0.5558159", "0.55369...
0.76580703
0
Process a full set of images, with parallelization if multiple CPU threads are available on this machine
def _process_images( raw_image_paths: pd.Series, raw_images_dir: str, ROI_definitions: Dict[str, Tuple], flat_field_filepath_or_none: Union[str, None], save_ROIs: bool, save_dark_frame_corrected_images: bool, save_flat_field_corrected_images: bool, ) -> Tuple[pd.DataFrame, pd.DataFrame]: def _process_image_local(raw_image_path): """ Version of process_image with all of the local configuration variables packed in. Also encapsulates the opening of the image. """ return process_image( original_rgb_image=raw.open.as_rgb(raw_image_path), original_image_filepath=raw_image_path, raw_images_dir=raw_images_dir, ROI_definitions=ROI_definitions, flat_field_filepath_or_none=flat_field_filepath_or_none, save_ROIs=save_ROIs, save_dark_frame_corrected_image=save_dark_frame_corrected_images, save_flat_field_corrected_image=save_flat_field_corrected_images, ) with ThreadPoolExecutor() as executor: # We want identical warnings to be shown only for the first image they occur on (the default), # but we also want subsequent calls to process_experiment to start with a fresh warning store # so that warnings don't stop showing after the first run. # catch_warnings gives us this fresh warning store. with warnings.catch_warnings(): # process_image returns roi_summary_data df, image_diagnostics df -> this will be a list of 2-tuples roi_summary_data_and_image_diagnostics_dfs_for_files = list( tqdm( executor.map(_process_image_local, raw_image_paths), total=len(raw_image_paths), ) ) roi_summary_data_for_files, image_diagnostics_for_files = zip( *roi_summary_data_and_image_diagnostics_dfs_for_files ) roi_summary_data_for_all_files = _stack_dataframes(roi_summary_data_for_files) image_diagnostics_for_all_files = _stack_serieses(image_diagnostics_for_files) return roi_summary_data_for_all_files, image_diagnostics_for_all_files
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def scan_images_multiprocessed(images, clf, processes, vstep=15, hstep=15, dnum=5):\n pool = Pool(processes=processes) # start 4 worker processes\n results = []\n for i in range(0, processes):\n begin = i * int(len(images) / processes)\n if i == processes - 1:\n end = len(images)...
[ "0.7019778", "0.701473", "0.68229294", "0.6802597", "0.67542833", "0.6570712", "0.6545006", "0.65141946", "0.6469355", "0.6455748", "0.6437736", "0.6373732", "0.6337872", "0.6320698", "0.6302606", "0.6255835", "0.6229472", "0.62148833", "0.6202636", "0.61866295", "0.61764526"...
0.56445336
94
Version of process_image with all of the local configuration variables packed in. Also encapsulates the opening of the image.
def _process_image_local(raw_image_path): return process_image( original_rgb_image=raw.open.as_rgb(raw_image_path), original_image_filepath=raw_image_path, raw_images_dir=raw_images_dir, ROI_definitions=ROI_definitions, flat_field_filepath_or_none=flat_field_filepath_or_none, save_ROIs=save_ROIs, save_dark_frame_corrected_image=save_dark_frame_corrected_images, save_flat_field_corrected_image=save_flat_field_corrected_images, )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def process(image):\n pass", "def process_image(self):\n pass", "def process(self, image):", "def process(self, image: np.ndarray) -> NamedTuple:\n\n return super().process(input_data={'image': image})", "def add_processed_image(image_proc_type, name, b64_string, export_file_type):\n\n if i...
[ "0.6546748", "0.6495667", "0.6287394", "0.57940394", "0.57754576", "0.57309806", "0.5728724", "0.5704282", "0.5697228", "0.565087", "0.56456286", "0.56390184", "0.5626983", "0.5626955", "0.555883", "0.5555184", "0.54956263", "0.54702634", "0.54599035", "0.54391104", "0.543066...
0.6391158
2
Initialize a two dimensional list as a matrix
def __init__(self, initArray): for row in initArray: for elem in row: if type(elem) is not int: raise TypeError n = len(initArray[0]) if not all(len(x) == n for x in initArray): raise ArithmeticError self.array = initArray return
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def matrixlist(inputlist, converter=proper, fake=False):\n if converter is None:\n converter = type(inputlist[0][0])\n xlen = len(inputlist[0])\n for x in xrange(1,len(inputlist)):\n if len(inputlist[x]) != xlen:\n raise IndexError(\"Unequal matrix row lengths for matrixlist of \"...
[ "0.732879", "0.705147", "0.69763094", "0.6929091", "0.69166654", "0.68848765", "0.68401814", "0.6766674", "0.66469675", "0.6645936", "0.66292936", "0.66119903", "0.6598583", "0.65325534", "0.6505658", "0.6502061", "0.6496865", "0.6486795", "0.6459021", "0.64460206", "0.643500...
0.0
-1
Return a string for the array of numbers for this matrix
def __str__(self): return str(self.array)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __repr__(self) -> str:\n\t\treturn \",\".join(\"\".join(str(n) for n in m) for m in self.matrix)", "def __str__(self):\n output = \"\"\n for i in self.values:\n st = []\n output += \"[\"\n for j in i:\n st.append(str(j))\n output += \"...
[ "0.7381142", "0.7210961", "0.7170365", "0.7031644", "0.6882609", "0.68663853", "0.6862946", "0.6755842", "0.67423487", "0.66417027", "0.65829957", "0.6536184", "0.6523809", "0.6509771", "0.64562243", "0.6420926", "0.64129996", "0.638619", "0.6340786", "0.63212585", "0.626529"...
0.66033196
10
Add two matricies together
def __add__(self, otherMatrix): sameRows = (len(self.array) == len(otherMatrix.array)) sameCols = len(self.array[0]) == len(otherMatrix.array[0]) if not (sameCols and sameRows): raise ArithmeticError X = len(self.array) Y = len(self.array[0]) retArray = [[0 for x in range(X)] for x in range(Y)] for row in range(X): for col in range(Y): retArray[row][col] = otherMatrix.array[row][col] + self.array[row][col] return matrix(retArray)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __add__(self, other):\r\n if isinstance(other, mat4):\r\n return mat4(map(lambda x,y: x+y, self.mlist, other.mlist))\r\n else:\r\n raise TypeError, \"unsupported operand type for +\"", "def matAdd(a, b):\n shape=matShape(a)\n return [[matGet(a,x,y)+matGet(b,x,y) for ...
[ "0.81966865", "0.8028442", "0.7911417", "0.784823", "0.7748032", "0.77048737", "0.7664608", "0.7599502", "0.75470144", "0.75353587", "0.75039405", "0.74704564", "0.74508417", "0.73524606", "0.73200405", "0.7277604", "0.724819", "0.7234582", "0.71502364", "0.7145327", "0.70963...
0.6929311
26
Multiply two matricies together
def __mul__(self, otherMatrix): if not (len(self.array[0]) == len(otherMatrix.array)): raise ArithmeticError common = len(self.array[0]) X = len(self.array) Y = len(otherMatrix.array[0]) newArray = [[0 for x in range(X)] for x in range(Y)] for row in range(X): for col in range(Y): for elem in range(common): newArray[row][col] += self.array[row][elem] * otherMatrix.array[elem][col] return matrix(newArray)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def matmul(a, b):\n raise NotImplementedError", "def matrix_mult(m1, m2):\n pass", "def matMul(a, b):\n sa=matShape(a)\n sb=matShape(b)\n if sa[1]!=sb[0]: raise ValueError\n ret=matZeros((sa[0],sb[1]))\n for i in range(sa[0]):\n for j in range(sb[1]):\n val=0.0\n for k ...
[ "0.8291302", "0.8262729", "0.80926496", "0.78970397", "0.78095114", "0.76959616", "0.76780117", "0.76680285", "0.7643243", "0.7567869", "0.753683", "0.7533653", "0.7526934", "0.752129", "0.74732196", "0.7456403", "0.7453868", "0.7389877", "0.7388949", "0.73717046", "0.7357616...
0.0
-1
Load instruments from configpath
def _load(self) -> list[Instrument]: logger.info("Loading config...") self._config = yml.load(self.configpath) instruments, modespec = self._config["instruments"], self._config["modes"] logger.success(f"Found {len(instruments)} instruments, {len(modespec)} modes")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_load_configs_simulation(self):\n global locator, config_paths\n locator.load_config(config_paths[1])\n\n self.assertEqual(locator.config['routines'], ['simulate'])\n self.assertEqual(locator.config['driver'],\n {\n 'type': 'SimDriver',\n 'kwargs': {...
[ "0.63088006", "0.62734544", "0.60392916", "0.59869903", "0.58425516", "0.5792564", "0.57058764", "0.56768847", "0.56603354", "0.5657189", "0.56550276", "0.56537586", "0.5621684", "0.5609595", "0.5587608", "0.5586826", "0.55499035", "0.5522914", "0.5517707", "0.5516953", "0.54...
0.7140976
0
Expose unique instrument classes found in config
def _expose(self) -> None: classes = {instrument.__class__ for instrument in self._config["instruments"]} for class_ in classes: pyro.expose(class_) logger.success(f"Exposed {len(classes)} instrument class(es): {classes}")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _config_classes(self):\n pass", "def config(self) -> InstrumentConfig:\n ...", "def register_instrument(instrument):\n config.INSTRUMENTS.append(instrument.id)\n for m in instrument.modules:\n register_module(m)", "def _instrument_class(self, cls):\n newcls = type('Instr...
[ "0.6167917", "0.6165286", "0.6108895", "0.5712228", "0.56472594", "0.5590002", "0.54058063", "0.5276742", "0.5238212", "0.517786", "0.5136025", "0.5066814", "0.5047145", "0.5043953", "0.5038126", "0.50306547", "0.502609", "0.5012202", "0.5012202", "0.50089884", "0.49899292", ...
0.66089696
0
Register instrument instances and self with daemon and storing uris
def _serve(self) -> None: for instrument in self._config["instruments"]: uri = self._daemon.register(instrument, objectId=str(instrument)) self._services[instrument.id] = str(uri) logger.success(f"Registered {instrument} at {uri}") self.uri = self._daemon.register(self, objectId=self.servername) logger.success(f"Registered self at {self.uri}")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def register_instrument(instrument):\n config.INSTRUMENTS.append(instrument.id)\n for m in instrument.modules:\n register_module(m)", "def register():\n signals.initialized.connect(initialize)\n signals.article_generator_context.connect(add_libravatar)", "def on_register(cls):", "def _regi...
[ "0.61770165", "0.586058", "0.5787275", "0.57755834", "0.5748999", "0.5597987", "0.5547358", "0.5466158", "0.5462284", "0.5456908", "0.5437423", "0.54095894", "0.5405339", "0.5405339", "0.53873736", "0.53591174", "0.5346121", "0.5344868", "0.5339623", "0.53348446", "0.53146374...
0.78891295
0
Save instruments and modes to configpath
def save(self) -> None: logger.info("Saving to config...") yml.save(self._config, self.configpath)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def save(self):\n for p, c in self.configs_:\n c.write(p)", "def save(self, config_path):\n raise NotImplementedError()", "def save():\n\t\ttry:\n\t\t\t#paths\n\t\t\tConfig.conf.set('paths', 'source_dir', Config.source_dir)\n\t\t\tConfig.conf.set('paths', 'lyrics_dir', Config.lyrics_di...
[ "0.69961077", "0.6905757", "0.67979085", "0.6779115", "0.663721", "0.6624185", "0.65824383", "0.6569083", "0.6564401", "0.6522336", "0.6489692", "0.646409", "0.6439854", "0.6433161", "0.637511", "0.63696384", "0.635844", "0.63290244", "0.6287945", "0.62523466", "0.62460214", ...
0.5880172
46
Disconnect instruments and shutdown daemon
def shutdown(self) -> None: logger.info("Disconnecting instruments...") for instrument in self._config["instruments"]: instrument.disconnect() logger.info(f"Shutting down {self}...") self._daemon.shutdown()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def shutdown(self):\n if self.alive:\n libplasma.disconnect(self.conn)\n self.alive = False", "def shutdown(self):", "def stopAndDisconnectWalabot():\n wlbt.Stop()\n wlbt.Disconnect()\n print ('Termination successful')", "def shutdown(self) -> None:", "def shutdown(self) -> None:", "d...
[ "0.67708665", "0.6769665", "0.6741867", "0.66888916", "0.66888916", "0.6571312", "0.65665215", "0.6500033", "0.64636064", "0.64133114", "0.6387295", "0.6361316", "0.63584465", "0.6352385", "0.6346766", "0.6346196", "0.6339963", "0.63314235", "0.63314235", "0.63314235", "0.631...
0.7881212
0
python ~/code/xdoctest/testing/test_linenos.py test_lineno_failcase_called_code python ~/code/xdoctest/testing/test_linenos.py
def test_lineno_failcase_called_code(): text = _run_case(utils.codeblock( r''' def func(a): """ Example: >>> func(0) >>> # this doesnt do anything >>> print('this passes') this passes >>> # call the failing code >>> func(3) """ if a > 0: nested_failure(a) return a def nested_failure(a): if a > 0: nested_failure(a - 1) else: raise Exception('fail case') ''')) assert 'rel: 6, abs: 9,' in text assert text
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_error(doctest):", "def test_expt(doctest):", "def test_exp(doctest):", "def testit(did_pass):\n\n # This function works correctly--it is verbatim from the text, chapter 6\n linenum = sys._getframe(1).f_lineno # Get the caller's line number.\n if did_pass:\n msg = \"Te...
[ "0.704092", "0.70044655", "0.6639948", "0.6638953", "0.65686834", "0.65359443", "0.6508109", "0.6495619", "0.643054", "0.6405408", "0.63980496", "0.63929", "0.6392692", "0.63884795", "0.63859123", "0.63859123", "0.6350053", "0.6331821", "0.6329094", "0.6329094", "0.63262904",...
0.7327849
0
Add to the list of describing adjectives.
def add_adjectives(self, adjective): self.adjectives += [adjective]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def add_adjectives(self, *sAdjs):\n self.adjectives += list(sAdjs)", "def add(self):\n pass", "def add_many_descriptors(self, descriptors):", "def add(self, PlugLead):\n\n self.check_conflicts(PlugLead)\n self.plugleads.append(PlugLead)", "def add_disease(self, disease):\n ...
[ "0.7263275", "0.62598264", "0.5947216", "0.57094675", "0.5695133", "0.5606961", "0.56066847", "0.5586834", "0.5575344", "0.5548141", "0.5520845", "0.5520845", "0.55087703", "0.5452977", "0.5440875", "0.53886217", "0.5388016", "0.5352896", "0.5326072", "0.5325428", "0.5324635"...
0.7504388
0
Returns the list of describing adjectives. The list is shuffled first because generally this is used to get a random adjective.
def get_adjectives(self): random.shuffle(self.adjectives) return self.adjectives
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_adjectives(lyrics):\n doc = nlp(lyrics.lower())\n all_adjectives = [token.lemma_ for token in doc if token.pos_ == \"ADJ\"]\n return all_adjectives", "def succ(self):\n return [ self.simple_reflection(i) for i in self.descents(positive=True) ]", "def getAdjectives(self, word):\n\t\t...
[ "0.605831", "0.5822134", "0.57419574", "0.573242", "0.55850464", "0.5502515", "0.5492401", "0.5489824", "0.5483387", "0.5447286", "0.54260534", "0.5316058", "0.52798134", "0.5279397", "0.5267892", "0.5265623", "0.525828", "0.52477735", "0.5244685", "0.5200043", "0.51937425", ...
0.81158966
0
Returns the noun, including all its describing adjectives, as a string.
def full_string(self): return "{}: {}".format(str(self.word), " ".join([str(adj) for adj in self.adjectives]))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def en_noun(t, label):\r\n s = label\r\n p = []\r\n is_uncountable = False\r\n \r\n # http://en.wiktionary.org/wiki/Template:en-noun\r\n head = t.arg(\"head\", label)\r\n p1 = t.arg(0)\r\n p2 = t.arg(1)\r\n \r\n if p1 == \"-\":\r\n # uncountable\r\n is_uncountable = True...
[ "0.6249946", "0.6021164", "0.600624", "0.5979195", "0.59327227", "0.58711636", "0.57597136", "0.5741161", "0.57387596", "0.57079136", "0.56950766", "0.5683717", "0.56733876", "0.5652428", "0.5563646", "0.55163616", "0.5513116", "0.5472722", "0.5459045", "0.5425532", "0.542323...
0.69946307
0
Parse a noun object from a data file containing nouns and their describing adjectives.
def parse(text): parts = text.split(' ') noun = Noun(parts[0], int(parts[1])) parts = parts[2:] while len(parts) > 0: noun.add_adjectives(Word(parts[0], int(parts[1]))) parts = parts[2:] return noun
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __read_data__(self):\n with open(self.file, 'r') as data:\n sentence = []\n tags = []\n for line in data:\n terms = line.rstrip().split(WHITESPACE)\n for term in terms:\n word_tag = tuple(term.split(TAGCHAR))\n ...
[ "0.5926569", "0.56615496", "0.5575726", "0.5492108", "0.54724497", "0.54638004", "0.5449956", "0.5432941", "0.5421955", "0.53711075", "0.53692234", "0.535761", "0.53158194", "0.5268704", "0.52373093", "0.5237189", "0.5234773", "0.5220252", "0.52158135", "0.5205877", "0.520256...
0.69166636
0
Returns the self.guessed_by and self.metaphors_used data as a readable string.
def get_str_metadata(self): return "\n".join(["Guessed by {}".format(self.guessed_by), "{} metaphors used".format(self.metaphors_used)])
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_strings(self):\n return self._guessed_strings", "def __str__(self):\n d = {}\n d[\"tuner_number\"] = self.tuner_number\n d[\"output_format\"] = self.output_format\n d[\"output_source\"] = self.output_source\n return str(d)"...
[ "0.66056585", "0.6369337", "0.63330597", "0.63225114", "0.62778735", "0.62415534", "0.6170479", "0.6140569", "0.60968494", "0.60799277", "0.60438675", "0.60398", "0.60284495", "0.6009198", "0.59779775", "0.59726894", "0.597072", "0.59602815", "0.594238", "0.5917474", "0.58926...
0.80824745
0
Takes `x_axis` and returns an uniformly sampled array of values from its minimum to its maximum with few extra points
def _extended_discrete_xaxis(x_axis, n_points=100, eps=0.10): min_value = np.min(x_axis) max_value = np.max(x_axis) distance = max_value - min_value return np.linspace(min_value - eps * distance, max_value + eps * distance, num=n_points)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def sample(self):\n # For each row: round(random .* (max - min) + min, 0)\n random_array = prng.np_random.rand(self.num_discrete_space)\n return [int(x) for x in np.floor(np.multiply((self.high - self.low + 1.), random_array) + self.low)]", "def scale(x_range=1, y_range=1):\r\n x = rand_v...
[ "0.6698139", "0.6558226", "0.65457267", "0.64248043", "0.6340408", "0.6254296", "0.61594844", "0.6148418", "0.60947704", "0.60901093", "0.5998522", "0.5891753", "0.5868367", "0.58548534", "0.5790514", "0.5744083", "0.5735349", "0.57298034", "0.57228523", "0.5720773", "0.56946...
0.59077674
11
Quick plot of a `tick.base.TimeFunction`
def plot_timefunction(time_function, labels=None, n_points=300, show=True, ax=None): if ax is None: fig, ax = plt.subplots(1, 1, figsize=(4, 4)) else: show = False if time_function.is_constant: if labels is None: labels = ['value = %.3g' % time_function.border_value] t_values = np.arange(10).astype('float') ax.plot(t_values, time_function.value(t_values), label=labels[0]) else: if labels is None: interpolation_to_legend = { TimeFunction.InterLinear: 'Linear', TimeFunction.InterConstLeft: 'Constant on left', TimeFunction.InterConstRight: 'Constant on right' } border_to_legend = { TimeFunction.Border0: 'border zero', TimeFunction.BorderConstant: 'border constant at %.3g' % time_function.border_value, TimeFunction.BorderContinue: 'border continue', TimeFunction.Cyclic: 'cyclic' } labels = [ 'original points', '%s and %s' % (interpolation_to_legend[time_function.inter_mode], border_to_legend[time_function.border_type]) ] original_t = time_function.original_t if time_function.border_type == TimeFunction.Cyclic: cycle_length = original_t[-1] original_t = np.hstack((original_t, original_t + cycle_length, original_t + 2 * cycle_length)) t_values = _extended_discrete_xaxis(original_t, n_points=n_points) ax.plot(time_function.original_t, time_function.original_y, ls='', marker='o', label=labels[0]) ax.plot(t_values, time_function.value(t_values), label=labels[1]) ax.legend() if show is True: plt.show() return ax.figure
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def plotTime(self):\n plt.figure()\n t = [i for i in range(len(self.nodes_infected))]\n print(t)\n plt.title('Nodos infectados vs Tiempo')\n plt.xlabel('Instantes de tiempo')\n plt.ylabel('# de nodos infectados')\n plt.plot(t, self.nodes_infected)\n plt.grid(...
[ "0.6426607", "0.62802076", "0.62569886", "0.62506014", "0.6236783", "0.6092056", "0.6068509", "0.60091037", "0.60037977", "0.60024494", "0.6001193", "0.59628236", "0.59590256", "0.5951325", "0.59470856", "0.59453624", "0.5932996", "0.5908867", "0.5891108", "0.5865043", "0.586...
0.68075925
0
Generates mapping from water measurements column names to indices of the given header.
def get_water_index_map(archive, header): column_re = { 'surface': { 'flow': 'pretok', 'level': 'vodostaj' }, 'ground': { 'altitude': 'nivo', 'level': 'vodostaj' } } column_map = {key: -1 for key in column_re[archive].keys()} empty = True # Do regex search of every db column for every CSV file column heading. for i, column in enumerate(header): for column_name in column_re[archive].keys(): if re.search(column_re[archive][column_name], column, re.IGNORECASE): if column_map[column_name] != -1: continue column_map[column_name] = i empty = False return None if empty else column_map
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __get_column_mapping(self):\n\n s1 = list(Sample(\"FL\", \"M1.0@265_Primary_ar115_s2010-08-06T06_36_00_e2010-08-06T18_24_00.csv\").get_data().columns)[:25]\n column_mapping = {}\n for i in range(len(s1)):\n column_mapping[i] = s1[i]\n\n return column_mapping", "def colu...
[ "0.60388774", "0.5985021", "0.5983487", "0.5812272", "0.573689", "0.57180965", "0.5631869", "0.55975854", "0.55681896", "0.5565681", "0.554784", "0.5505632", "0.5441756", "0.5394642", "0.53900725", "0.53899986", "0.5387639", "0.537637", "0.5363915", "0.53568316", "0.53414994"...
0.6957255
0
Generates mapping from water measurements column names to values of the given CSV row.
def get_water_value_map(row, column_names_map): column_values_map = column_names_map.copy() row_length = len(row) empty = True for key, index in column_names_map.items(): # Check if non-empty value exist for given index. if -1 < index < row_length: value = row[index].strip() if value: column_values_map[key] = value empty = False continue # Else NULL is inserted in db. column_values_map[key] = 'NULL' return None if empty else column_values_map
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __get_column_mapping(self):\n\n s1 = list(Sample(\"FL\", \"M1.0@265_Primary_ar115_s2010-08-06T06_36_00_e2010-08-06T18_24_00.csv\").get_data().columns)[:25]\n column_mapping = {}\n for i in range(len(s1)):\n column_mapping[i] = s1[i]\n\n return column_mapping", "def crea...
[ "0.62421864", "0.57918304", "0.5663072", "0.5657489", "0.56149256", "0.5612935", "0.5567542", "0.5544542", "0.5500382", "0.5480754", "0.545259", "0.544672", "0.5385878", "0.53811884", "0.5380167", "0.5332743", "0.53060913", "0.5292364", "0.527309", "0.52203315", "0.5217865", ...
0.62722737
0
Populate water measurements table for selected `archive`, `directory` and `stations`.
def populate_water_measurements(cursor, archive, directory, station): csv_path = get_data_path( 'water', 'raw', archive, directory, f'{station}.csv' ) with open(csv_path, 'r', encoding='utf-8') as file: reader = csv.reader(file, delimiter=';') header = next(reader) column_names_map = get_water_index_map(archive, header) if not column_names_map: return False water_body = get_water_definitions(archive)['body'] for row in reader: column_values_map = get_water_value_map(row, column_names_map) if column_values_map: date = datetime.strptime(row[0], '%d.%m.%Y').date() data_columns = ', '.join(column_values_map.keys()) data_values = ', '.join(column_values_map.values()) cursor.execute(f'''INSERT INTO {water_body}_measurements (station_id, date, {data_columns}) VALUES ({station}, '{str(date)}', {data_values})''') return True
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def populate_water_tables(connection):\n metadata = load_metadata('water')\n cursor = connection.cursor()\n\n # Check if tables are already populated.\n cursor.execute('SELECT count(*) FROM watercourses')\n watercourse_count = cursor.fetchone()[0]\n cursor.execute('SELECT count(*) FROM aquifers')...
[ "0.69976664", "0.58139586", "0.5748849", "0.5552527", "0.5550863", "0.5530793", "0.55279726", "0.5518004", "0.5421447", "0.54047465", "0.5358192", "0.53122264", "0.52683693", "0.5263279", "0.52280056", "0.5153076", "0.51038533", "0.51007193", "0.507282", "0.50530833", "0.5048...
0.7368926
0
Populate watercourse and aquifer related data tables.
def populate_water_tables(connection): metadata = load_metadata('water') cursor = connection.cursor() # Check if tables are already populated. cursor.execute('SELECT count(*) FROM watercourses') watercourse_count = cursor.fetchone()[0] cursor.execute('SELECT count(*) FROM aquifers') aquifer_count = cursor.fetchone()[0] if watercourse_count and aquifer_count: print('Water tables already populated!') return station_data = get_station_data() for archive in metadata.keys(): print(f'{archive}-water:'.upper()) water_body = get_water_definitions(archive)['body'] # 1. Populate watercourses/aquifers: stations = {} for water_body_name in metadata[archive].keys(): print(f'\tPopulating {water_body}: "{water_body_name}"') cursor.execute(f'''INSERT INTO {water_body}s(location_id, name) VALUES (0, '{water_body_name}')''') water_body_id = cursor.lastrowid # 2. Populate watercourse_stations/aquifer_stations: for station_id in metadata[archive][water_body_name]['stations']: station_name = clean_name(metadata[archive][water_body_name]['stations'][station_id]['name']) if station_id in stations: # Prefer watercourses/aquifer with more stations current_len = len(metadata[archive][water_body_name]['stations']) previous_len = len(metadata[archive][stations[station_id]]['stations']) if current_len < previous_len: print(f'\t\tStation already exists: {station_id} - "{station_name}" ("{water_body_name}")') continue else: cursor.execute(f'''DELETE FROM {water_body}_stations WHERE id = {station_id}''') print(f'\t\tRemoved station: {station_id} - "{station_name}" from "{stations[station_id]}")') stations[station_id] = water_body_name print(f'\t\tPopulating station: {station_id} - "{station_name}"') # Insert station location if station data exists. location_id = 0 station_row = station_data.query(f'ŠIFRA == "{station_id}"') if not station_row.empty: index = station_row.index[0] lat = station_row.at[index, 'LAT'] lng = station_row.at[index, 'LON'] if not np.isnan(lat) and not np.isnan(lng): name = f"{station_row.at[index, 'VODOMERNA POSTAJA']} ({station_row.at[index, 'VODOTOK']})" cursor.execute(f'''INSERT INTO locations(name, lat, lng) VALUES ('{name}', {lat}, {lng})''') location_id = cursor.lastrowid # Insert station. cursor.execute(f'''INSERT INTO {water_body}_stations(id, {water_body}_id, location_id, name) VALUES ({station_id}, {water_body_id}, {location_id}, '{station_name}')''') # 3. Populate watercourse_measurements/aquifer_measurements: if not populate_water_measurements(cursor, archive, metadata[archive][water_body_name]['dir'], station_id): cursor.execute(f'''DELETE FROM {water_body}_stations WHERE id = {station_id}''') print(f'\t\tRemoved station with useless data: {station_id} - "{station_name}"') # Remove empty watercourses/aquifers. cursor.execute(f'''SELECT w.id, w.name FROM {water_body}s w WHERE NOT EXISTS ( SELECT s.id FROM {water_body}_stations s WHERE w.id = s.{water_body}_id )''') for row in cursor.fetchall(): cursor.execute(f'''DELETE FROM {water_body}s WHERE id = {row[0]}''') print(f'\tRemoved empty {water_body}: "{row[1]}"')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def populate_db():\n\n populate_table(db, models.Department, departments_data)\n populate_table(db, models.Employee, employees_data)", "def populate_tables(connection: sqlite3.Connection) -> None:\n fake = Faker()\n Faker.seed(0)\n\n c = conn.cursor()\n\n number_of_courses = fake.pyint(min_valu...
[ "0.69008756", "0.66442764", "0.6618634", "0.65720314", "0.6367745", "0.63610566", "0.61599475", "0.61476094", "0.60915166", "0.60889375", "0.60289264", "0.59944206", "0.59331673", "0.5864972", "0.582192", "0.5799883", "0.5796334", "0.5792097", "0.5777224", "0.5760808", "0.574...
0.71547
0
Populate locations data table.
def populate_locations(connection): print('Populating locations...') cursor = connection.cursor() with open(get_data_path('locations', 'locations.json'), 'r', encoding='utf-8') as json_file: locations = json.load(json_file) for station_id, location in locations.items(): cursor.execute(f'''SELECT id FROM watercourse_stations WHERE id = {station_id}''') if len(cursor.fetchall()): cursor.execute(f'''INSERT INTO locations(name, lat, lng) VALUES ('{location['name']}', {location['lat']}, {location['lng']})''') cursor.execute(f'''UPDATE watercourse_stations SET location_id = {cursor.lastrowid} WHERE id = {station_id}''')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def generate_position_data(self):\n # populate 'Location' field randomly\n self.output['Location'] = np.random.choice(self.locations, self.obs)\n\n # clean up geodata data frame and create 'Position' attribute\n nc = self.geodata[['Lat', 'Lng', 'Elevation']].round(2)\n nc['Elevat...
[ "0.6048028", "0.6013519", "0.59421986", "0.5901815", "0.58811194", "0.5729807", "0.5710549", "0.56738675", "0.56535035", "0.554302", "0.5542974", "0.55208075", "0.5469795", "0.54697716", "0.5454492", "0.54353935", "0.5430382", "0.542676", "0.5424422", "0.5422587", "0.54031056...
0.6565873
0
Check if given forecast dictionary contains a numeric value with provided key.
def is_forecast_number(key, forecast): return key in forecast and type(forecast[key]) in [float, int]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def moreThanOne(dict, key):\n\treturn key in dict and dict[key] > 0", "def _has_numeric_strict(self) -> bool:\n return bool({'i', 'f'} & self._data.keys())", "def contains_200(dictnr):\n contains = False\n for i in dictnr:\n if dictnr[i] == 200:\n contains = True\n print(conta...
[ "0.6130827", "0.6060219", "0.5976841", "0.5809231", "0.5789039", "0.5752602", "0.56971747", "0.5649147", "0.56462014", "0.5644481", "0.56440645", "0.5638638", "0.56340736", "0.560666", "0.5580076", "0.55792755", "0.55783355", "0.55679023", "0.55529094", "0.5530956", "0.552016...
0.842359
0
Populate weather data tables.
def populate_weather(connection): metadata = load_metadata('weather') cursor = connection.cursor() water_defs = get_water_definitions() # Check if tables are already populated. cursor.execute('SELECT count(*) FROM weather') weather_count = cursor.fetchone()[0] if weather_count: print('Weather tables already populated!') return print('WEATHER:') # Darksky data for dir_name, location in metadata.items(): print(f'\tPopulating weather: "{location["name"]}".') # Insert location. cursor.execute(f'''INSERT INTO locations(name, lat, lng) VALUES ('{location['name']}', {location['lat']}, {location['lng']})''') location_id = cursor.lastrowid # Set weather locations for watercourses/aquifers. for water_body in [d['body'] for d in water_defs.values()]: if water_body in location: cursor.execute(f'''UPDATE {water_body}s SET location_id = {location_id} WHERE name IN ('{"','".join(location[water_body])}')''') break dir_path = get_data_path('weather', 'raw', dir_name) for json_file_name in os.listdir(dir_path): json_path = os.path.join(dir_path, json_file_name) with open(json_path, 'r', encoding='utf-8') as json_file: print(f'\t\tPopulating year: {json_file_name[0:-5]}') year_forecasts = json.load(json_file) for date, date_forecast in year_forecasts.items(): hourly_forecasts = date_forecast['hourly'] if not hourly_forecasts: print(f'\t\tNo hourly forecasts for {date}!') continue daily_forecast = { 'location_id': location_id, 'time': date_forecast['time'], 'day_time': date_forecast['sunset_time'] - date_forecast['sunrise_time'], 'precipitation': 0, 'snow_accumulation': 0 } # List of value names with `avg`, `min` and `max` values value_names = { 'temperature': 'temperature', 'cloud_cover': 'cloudCover', 'dew_point': 'dewPoint', 'humidity': 'humidity', 'pressure': 'pressure', 'uv_index': 'uvIndex', 'precipitation_probability': 'precipProbability', 'precipitation_intensity': 'precipIntensity' } # Value name counters, which indicate how many times (out of 24) # certain value appears in hourly data. value_counts = {k: 0 for k in value_names.keys()} for value_name in value_names.keys(): daily_forecast[f'{value_name}_avg'] = 0.0 daily_forecast[f'{value_name}_min'] = float('inf') daily_forecast[f'{value_name}_max'] = float('-inf') # Calculate daily forecast values from hourly forecasts. for hourly_forecast in hourly_forecasts: for value_name in value_names.keys(): orig_value_name = value_names[value_name] if is_forecast_number(orig_value_name, hourly_forecast): daily_forecast[f'{value_name}_avg'] += hourly_forecast[orig_value_name] daily_forecast[f'{value_name}_min'] = min( hourly_forecast[orig_value_name], daily_forecast[f'{value_name}_min'] ) daily_forecast[f'{value_name}_max'] = max( hourly_forecast[orig_value_name], daily_forecast[f'{value_name}_max'] ) value_counts[value_name] += 1 if is_forecast_number('precipAccumulation', hourly_forecast) \ and hourly_forecast['precipType'] == 'snow': daily_forecast['snow_accumulation'] += hourly_forecast['precipAccumulation'] elif is_forecast_number('precipIntensity', hourly_forecast) \ and is_forecast_number('precipProbability', hourly_forecast): daily_forecast['precipitation'] += \ hourly_forecast['precipIntensity'] * hourly_forecast['precipProbability'] for value_name, value_count in value_counts.items(): if value_count: # Calculate average. daily_forecast[f'{value_name}_avg'] = daily_forecast[f'{value_name}_avg'] / value_count else: # If value never appeared daily_forecast[f'{value_name}_avg'] = 'NULL' daily_forecast[f'{value_name}_min'] = 'NULL' daily_forecast[f'{value_name}_max'] = 'NULL' cursor.execute(f'''INSERT INTO weather({', '.join(daily_forecast.keys())}) VALUES ({', '.join([str(v) for v in daily_forecast.values()])})''') # IOT data: for location in SETTINGS['weather_locations_iot']: print(f'\tPopulating weather: "{location["name"]}".') # Insert location. cursor.execute(f'''INSERT INTO locations(name, lat, lng) VALUES ('{location['name']}', {location['lat']}, {location['lng']})''') location_id = cursor.lastrowid # Set weather locations for watercourses/aquifers. for water_body in [d['body'] for d in water_defs.values()]: if water_body in location: cursor.execute(f'''UPDATE {water_body}s SET location_id = {location_id} WHERE name IN ('{"', '".join(location[water_body])}')''') # Set locations for all stations on given water body to match its location. cursor.execute(f'''SELECT id FROM {water_body}s WHERE location_id = {location_id}''') ids = [row[0] for row in cursor.fetchall()] if len(ids): cursor.execute(f'''UPDATE {water_body}_stations SET location_id = {location_id} WHERE {water_body}_id IN ({', '.join([str(v) for v in ids])})''') break file_name = f'''{location['lat']}-{location['lng']}.json''' json_path = get_data_path('weather', 'raw', file_name) # If data file doesn't exist, download it first. if not os.path.isfile(json_path): with open(json_path, 'wb', encoding="utf-8") as file: file.write(read_from_url(location['url'], decode=False)) with open(json_path, 'r', encoding='utf-8') as json_file: row_names = { "Sun_duration": "sun_duration", "CloudCover": "cloud_cover_avg", "Percipitation": "precipitation", "New_snow_blanket": "snow_accumulation", "Snow_blanket": "snow_depth", "TemperatureAvg": "temperature_avg", "TemperatureMin": "temperature_min", "TemperatureMax": "temperature_max" } forecasts = json.load(json_file) for forecast in forecasts: f = {row_names[k]: forecast[k] for k in row_names.keys()} f['location_id'] = location_id f['time'] = round(forecast['LastUpdatedEpoch'] / 1000) cursor.execute(f'''INSERT INTO weather({', '.join(f.keys())}) VALUES ({', '.join([str(v) for v in f.values()])})''')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def populate_water_tables(connection):\n metadata = load_metadata('water')\n cursor = connection.cursor()\n\n # Check if tables are already populated.\n cursor.execute('SELECT count(*) FROM watercourses')\n watercourse_count = cursor.fetchone()[0]\n cursor.execute('SELECT count(*) FROM aquifers')...
[ "0.6654514", "0.66226166", "0.6348557", "0.632052", "0.62991303", "0.62082505", "0.61876976", "0.6142009", "0.60627335", "0.6002963", "0.5983051", "0.59750384", "0.5964339", "0.5926051", "0.59136045", "0.59097177", "0.58774203", "0.5861508", "0.5857061", "0.5843847", "0.58242...
0.78376013
0
Creates and populates water and weather database. Returns None
def create_databases(): db_connection = connect_to_db() # Create database tables. create_tables(db_connection) # Populate water tables. populate_water_tables(db_connection) # station_data = get_station_data() # station = station_data.query('ŠIFRA == 30301') # print(station) # index = station.index[0] # lat = station.at[index, 'LAT'] # lng = station.at[index, 'LON'] # name = f"{station.at[index, 'VODOMERNA POSTAJA']} ({station.at[index, 'VODOTOK']})" # print(index, lat, lng, name) # Populate location tables # populate_locations(db_connection) # Populate weather tables populate_weather(db_connection) db_connection.commit() db_connection.close()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def populate_weather(connection):\n metadata = load_metadata('weather')\n cursor = connection.cursor()\n water_defs = get_water_definitions()\n\n # Check if tables are already populated.\n cursor.execute('SELECT count(*) FROM weather')\n weather_count = cursor.fetchone()[0]\n\n if weather_coun...
[ "0.753115", "0.6986817", "0.6771315", "0.67573315", "0.6443865", "0.63336414", "0.63230395", "0.6307932", "0.6303793", "0.62672585", "0.6251016", "0.6219543", "0.62010366", "0.61712205", "0.6160077", "0.6145157", "0.60383976", "0.6035518", "0.6035307", "0.60166687", "0.601191...
0.67318547
4
Provide string of addresses and this class will extract "street" & "house number".
def __init__(self, string): self.string = string
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _get_address(self, soup):\n street, city, state, zipcode = None, None, None, None\n try:\n # property detail tag\n street = soup.find('div', class_='main-address').get_text().strip()\n # find address tag\n address = soup.find('div', class_='c-address')\...
[ "0.7285767", "0.7073066", "0.68373525", "0.6803627", "0.6702961", "0.66992515", "0.6645722", "0.6645722", "0.66008115", "0.6555697", "0.65275055", "0.64732426", "0.643964", "0.64220065", "0.6419511", "0.63993114", "0.63021076", "0.62145597", "0.6205807", "0.62011707", "0.6192...
0.0
-1
Given the input string, tries to separate them into "street" & "house number" variables
def filter_string(self): logger.info("Information Gathering Finished!") pattern_street = re.compile(r'[A-Za-z]+\s?\w+(?=\s[Nn]o\s\d+$) |' r' [A-Za-z]+\s?\w+\s?[A-Za-z]+\s?[A-Za-z]+', re.X) # street pattern match_street = pattern_street.search(self.string) # If there are no house numbers provided in the input file, # print(not found) in the output JSON file numbers_instring = re.findall(r'\d+', self.string) # digit counts in given string if len(numbers_instring) > 0: # In most cases we have: "no" followed by some digits pattern_housenumber = re.compile(r'(\d+\s?[A-Za-z]?$) |' r' (^\d+) |' r' [Nn]o+[\s?]+[0-9]+$', re.X) # house number pattern match_housenumber = pattern_housenumber.search(self.string) fin_housenumber = match_housenumber[0] else: match_housenumber = ["not found"] fin_housenumber = match_housenumber[0] fin_street = match_street[0] print("street: ", fin_street) print("housenumber: ", fin_housenumber) return {'street': fin_street, 'housenumber': fin_housenumber}
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def street_parser(*street_data):\n\n # parsing tuples\n if len(street_data) == 2:\n if not isinstance(street_data[0], str) and not isinstance(street_data[1], str):\n raise WrongInput(\"Invalid format\")\n # street name as the tuple's first item\n strname, strnumber = street_da...
[ "0.70592016", "0.66491073", "0.61709803", "0.6159874", "0.614569", "0.6053972", "0.6050844", "0.6037463", "0.5887759", "0.58091587", "0.5799028", "0.57310367", "0.56918335", "0.56567967", "0.5650815", "0.5538323", "0.5499395", "0.54938877", "0.5493846", "0.54662937", "0.54369...
0.55393785
15
Helper function to construct multidimensional dictionaries e.g myhash = _makehash() myhash[1][2] = 4 myhash[2][5][8] = 17
def _makehash(): return defaultdict(_makehash)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def hashMap(self,arr):\r\n n = len(arr)\r\n dict1 = {}\r\n i = 1\r\n for i in range(n): \r\n if(i > 0): \r\n key=arr[i]\r\n value=arr[0]\r\n dict1[key] = value\r\n return dict1", "def __init__(self):\n ...
[ "0.67796296", "0.5938041", "0.5885213", "0.5752976", "0.5734703", "0.5709291", "0.5704826", "0.5652041", "0.5644233", "0.56247675", "0.56185186", "0.5611323", "0.56072676", "0.5539448", "0.5488976", "0.5445115", "0.54388916", "0.54211164", "0.54074925", "0.53994673", "0.53773...
0.6773854
1
Convert headers of fetched tickers to same format for convenient data storage in Database. This method assumes that parser's headers are configured properly(headers_dict), if one of the headers is missing in config file exception raised
def convert_headers(self, tickers): result = _makehash() for pair_name, fetched_values_dict in list(tickers.items()): for header, value in list(fetched_values_dict.items()): result[pair_name][self.config['headers'][header]] = value return result
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _parse_headers(headers):\n\n headers_new = []\n # reformat column headers if needed\n for j, hd in enumerate(headers):\n # rename so always have T1/2 (s)\n if hd == \"T1/2 (num)\" or hd == \"T1/2 (seconds)\":\n hd = \"T1/2 (s)\"\n # for uncertainties, add previous colum...
[ "0.6760923", "0.6707738", "0.66021603", "0.6435935", "0.62480223", "0.62002486", "0.6085462", "0.60607", "0.605833", "0.6038865", "0.5981734", "0.5961042", "0.5954765", "0.59422773", "0.5939061", "0.5936525", "0.5867014", "0.5862857", "0.5862484", "0.586054", "0.5848212", "...
0.7734185
0
Calculate the similarity based on Cosine Similarity between two CTRDMs
def cosinesimilarity_cal(CTRDM1, CTRDM2): # get number of conditions n_cons = np.shape(CTRDM1)[0] # calculate the number of value above the diagonal in RDM n = n_cons * (n_cons - 1) # initialize two vectors to store the values above the diagnal of two RDMs v1 = np.zeros([n], dtype=np.float64) v2 = np.zeros([n], dtype=np.float64) # assignment nn = 0 for i in range(n_cons): for j in range(n_cons): if i != j: v1[nn] = CTRDM1[i, j] v2[nn] = CTRDM2[i, j] nn = nn + 1 # calculate the Cosine Similarity V1 = np.mat(v1) V2 = np.mat(v2) num = float(V1 * V2.T) denom = np.linalg.norm(V1) * np.linalg.norm(V2) cos = num / denom similarity = 0.5 + 0.5 * cos return similarity
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def cosine_similarity(v1, v2):\n # Cosine Sim:\n # Get the words that both have in common\n\n v1words = set(v1.keys())\n v2words = set(v2.keys())\n\n numerator_words = v1words.intersection(v2words)\n\n # Multiply and sum those counts\n numerator = 0.0\n for word in numerator_words:\n ...
[ "0.7754487", "0.7676584", "0.7633492", "0.76006675", "0.7589376", "0.7576636", "0.75592244", "0.75373095", "0.7522885", "0.746666", "0.74491453", "0.7418762", "0.7398554", "0.73664916", "0.7334395", "0.7312988", "0.7309643", "0.7292841", "0.72754246", "0.7240708", "0.72226435...
0.80601525
0
Adds basic_vector to the basic vectors. If there are at least 3 arrays in _basic_vectors, then add a new array to _featureVector. This added array is composed of the basic vectors and its 2 first central derivatives basic_vector must be the array returned by the mfcc.
def build_feature_vector(self, basic_vector): basic_vector = basic_vector - np.mean(basic_vector) self._basic_vectors.append(basic_vector) if len(self._basic_vectors) > 2: #if there are at least 3 basic vectors we can calculate the central derivative for the vector before this one first_derivative = (basic_vector - self._basic_vectors[-3])/(2*self.seconds_to_next_vector) second_derivative = (basic_vector - 2*self._basic_vectors[-2] + self._basic_vectors[-3])/(self.seconds_to_next_vector**2) feature_vector = np.concatenate((basic_vector, first_derivative, second_derivative)) self._feature_vectors.append(feature_vector)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def add_vector(self, doc_name, add_cnt, new_docvec):\n \n # determine the weight of the merging pieces\n old_weight = float(self.vector_cnt) / (self.vector_cnt + add_cnt)\n new_weight = float(add_cnt) / (self.vector_cnt + add_cnt)\n \n if len(self.name) == 0:\n self.nam...
[ "0.5917165", "0.5735873", "0.57002974", "0.5500693", "0.54043525", "0.5337707", "0.5288758", "0.5286068", "0.5255515", "0.5167239", "0.51532346", "0.5146452", "0.5016166", "0.5007243", "0.49999866", "0.4954787", "0.49387354", "0.49348387", "0.4906069", "0.48973984", "0.489477...
0.8391323
0
If there is at least an feature vector then returns it, else returns None
def get_last_feature_vectors(self): if len(self._feature_vectors): return self._feature_vectors[-1] return None
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_training(feature_path): \n features = np.loadtxt(feature_path)\n feature_size = features.shape[1] -1 \n features_in = features[:,0:feature_size]\n features_out = features[:,-1]\n #features_out = np.array(map(lambda x: x if x else 0, features_out_unnorm))\n return features_in, features_out...
[ "0.6076066", "0.6039801", "0.60045683", "0.5997051", "0.59677297", "0.5927279", "0.5924938", "0.592038", "0.58969414", "0.58928514", "0.5883369", "0.5868851", "0.5810611", "0.57908976", "0.57573485", "0.5748689", "0.57457215", "0.5690704", "0.5680142", "0.5675147", "0.5661629...
0.6274125
0