query
stringlengths
9
3.4k
document
stringlengths
9
87.4k
metadata
dict
negatives
listlengths
4
101
negative_scores
listlengths
4
101
document_score
stringlengths
3
10
document_rank
stringclasses
102 values
Compute one step of the map for a score (of the given axis and with given neighbours) as a function of the opposite score
def _one_step(self, gamma, axis, opp_scores): opp_exp = opp_scores**gamma s = _np.array([]) for i in range(self.d[axis]): s = _np.append(s, _np.take(opp_exp, self._neighb[axis][i]).sum()) return s/_np.mean(s)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _score_to_distance_map(y_grid, x_grid, heatmap, points_y, points_x,\n score_distance_offset):\n y_diff = y_grid[:, :, tf.newaxis] - points_y\n x_diff = x_grid[:, :, tf.newaxis] - points_x\n distance = tf.math.sqrt(y_diff**2 + x_diff**2)\n return tf.math.divide(heatmap, distance + ...
[ "0.58188856", "0.5598756", "0.5585042", "0.55513406", "0.5534682", "0.5500189", "0.5466905", "0.541867", "0.54055065", "0.5333235", "0.53182185", "0.5281047", "0.5278096", "0.52513933", "0.5239409", "0.5227734", "0.5214485", "0.5214058", "0.5212882", "0.520227", "0.5186105", ...
0.50255334
43
Given the scores s, the list of nodes that already reached the low boundary and the ranking computed so far, check if new nodes have reached the low boundary, and, if so, updated the ranking with those
def _update_zero_rank(self, s, zero_ind, rank): lb = self.params['low_bound'] new_zeros_ind = _np.setdiff1d(_np.nonzero(s <= lb), zero_ind) if len(new_zeros_ind) > 0: sorted_zeros_ind = new_zeros_ind[_np.argsort(s[new_zeros_ind])] rank = _np.append(rank, sorted_zeros_ind) zero_ind = _np.append(zero_ind, new_zeros_ind) return rank, zero_ind
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def rerank_candidates(s, pred2sub_rank, all_predictions, rerank_top=20):\n predicted_smiles = []\n model_input = []\n for (predict_smi, label), _ in Counter(all_predictions).most_common(rerank_top):\n if predict_smi == s:\n continue\n features = get_all_features(\n get_...
[ "0.5954974", "0.59247744", "0.5833818", "0.56625795", "0.56450963", "0.5566416", "0.55646116", "0.5558839", "0.5552098", "0.5540993", "0.55369794", "0.5497016", "0.546615", "0.54586864", "0.5453142", "0.54514194", "0.54451036", "0.5403085", "0.53968745", "0.53879005", "0.5386...
0.6555181
0
Update the class variables after the algorithm execution
def _update_vars(self, axis, traj_s, traj_o, rank_s, rank_o, t): if axis == 0: self.x_traj = traj_s self.x_ranking = rank_s self.x_scores = traj_s[-1] self.inverse_y_traj = traj_o self.inverse_y_ranking = rank_o self.inverse_y_scores = traj_o[-1] if axis == 1: self.y_traj = traj_s self.y_ranking = rank_s self.y_scores = traj_s[-1] self.inverse_x_traj = traj_o self.inverse_x_ranking = rank_o self.inverse_x_scores = traj_o[-1]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def updateData(self, *args):\n # if self.move_next_option == \"R\":\n # self.restSampling()\n # elif self.move_next_option == \"A\":\n # self.addExtra()\n # else:\n # self.continueReview()\n for name, value in self.parameter_inputs.items():\n ...
[ "0.6898461", "0.6669482", "0.6625015", "0.6544939", "0.6408313", "0.6408313", "0.6408313", "0.6405048", "0.6306032", "0.6305308", "0.6305308", "0.6305308", "0.6305308", "0.6305308", "0.6305308", "0.6305308", "0.6305308", "0.6305308", "0.6305308", "0.6305308", "0.6305308", "...
0.0
-1
Get two lists (for rows and columns) where the row/col index gives the list of col/row indexes of the nonzero elements.
def _get_index_lists(self, mat): n_row, n_col = mat.shape col_ind_at_row, row_ind_at_col = [],[] for i in range(n_row): aux_ind = _np.where(mat[i]>0)[0] if len(aux_ind) == 0: raise Exception('Row {} is composed of zeros'.format(i)) col_ind_at_row.append(aux_ind) for j in range(n_col): aux_ind = _np.where(mat[:,j]>0)[0] if len(aux_ind) == 0: raise Exception('Column {} is composed of zeros'.format(j)) row_ind_at_col.append(aux_ind) return col_ind_at_row, row_ind_at_col
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_empty_cells(state):\n cells = []\n for row_index, row in enumerate(state.board):\n for col_index, cell in enumerate(row):\n if cell == 0:\n cells.append([row_index, col_index])\n return cells", "def empty_cells(state):\n cells = []\n\n for i, row in enumera...
[ "0.70034736", "0.6999915", "0.6924702", "0.6525192", "0.6505699", "0.6490474", "0.64878106", "0.6456846", "0.6356905", "0.63506573", "0.6348926", "0.62902606", "0.6273074", "0.62722355", "0.6267735", "0.62005794", "0.6167228", "0.61156595", "0.6083813", "0.6070011", "0.606649...
0.72458094
0
Check if the algorithm has been run. It also return the trajectories and the ranking of the associated axis
def _check_run(self, axis): if (self.x_traj, self.y_traj)[axis] is None: if (self.inverse_x_traj, self.inverse_y_traj)[axis] is None: raise Exception('The algorithm has not been run.') else: if self.params['print_info']: print('Warning: you are using the opposite score. It can contain errors if any score is a zero below threshold.') return (self.inverse_x_traj, self.inverse_y_traj)[axis], (self.inverse_x_ranking, self.inverse_y_ranking)[axis] return (self.x_traj, self.y_traj)[axis], (self.x_ranking, self.y_ranking)[axis]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def run(self, axis, gamma):\n \n # Trajectories of the main score to compute and the opposite one\n traj_s, traj_o = [_np.ones(self.d[axis])], [_np.ones(self.d[1-axis])]\n # Ranked indices of the scores\n rank_s, rank_o = _np.array([], dtype=int), _np.array([], dtype=int)\n # ...
[ "0.57092583", "0.5609589", "0.5562583", "0.5543754", "0.54045916", "0.5378096", "0.5376567", "0.5344754", "0.53006", "0.52756476", "0.5219423", "0.51986915", "0.51444024", "0.51367295", "0.51324886", "0.50954634", "0.50765365", "0.506339", "0.50485265", "0.50454706", "0.50318...
0.7411444
0
Compute the extinction area of a binary matrix formatted as a "indexes_lists" (see "_get_index_lists"), given a ranking of the nodes and the axis (0 for row removing, 1 for column removing).
def _ext_area(axis, ranking, row_ind_at_col, col_ind_at_row): if axis == 0: indexes_lists = _np.array([col_ind_at_row[:], row_ind_at_col[:]]) else: indexes_lists = _np.array([row_ind_at_col[:], col_ind_at_row[:]]) if len(ranking) != len(indexes_lists[0]): print ('Dimensions do not match') return # Counting the already extincted columns. They are the ones whose list of # associated row indexes is empty. In that case the extinction counter is # increased and a -1 is added to the indexes list. ext_nodes = 0 for c in range(len(indexes_lists[1])): if len(indexes_lists[1][c]) == 0: ext_nodes += 1 indexes_lists[1][c] = _np.append(indexes_lists[1][c], -1) ext_curve = [ext_nodes] # Iteration over the ranked nodes to remove, r for r in ranking[:-1]: # Iter over the connected nodes in the other layer, r for c in indexes_lists[0][r]: # Removing the ranked node from the neighbours of c indexes_lists[1][c] = indexes_lists[1][c][indexes_lists[1][c] != r] # If the neighbours of c is empty, then c gets extincted if len(indexes_lists[1][c]) == 0: ext_nodes += 1 indexes_lists[1][c] = _np.append(indexes_lists[1][c], -1) ext_curve.append(ext_nodes) # Returning the area below the extinction curve return sum(ext_curve) / float(len(indexes_lists[0]) * len(indexes_lists[1]))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def independendentColumns(matrix, rank, threshold):\n\n index = [None]*rank\n norms = [None]*rank\n\n if rank == 0:\n return index\n\n # select the first column\n index[0] = 0\n norms[0] = np.linalg.norm(matrix[:, 0])\n idx = 1\n\n if rank == 1:\n return index\n\n for i in rang...
[ "0.5215772", "0.4802194", "0.47601265", "0.47544992", "0.47510993", "0.4719506", "0.46831623", "0.46391895", "0.46330476", "0.46060845", "0.4589925", "0.4584294", "0.45521936", "0.45242146", "0.45011955", "0.4497215", "0.44914553", "0.44876465", "0.44790936", "0.44733527", "0...
0.75243336
0
Return an ndarray having row per element in data and one column
def make_data(self, data): return array(data, dtype=float32)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_data(data):\n\n np_data = np.array(data)\n array = []\n\n for i in range(0, np_data.shape[1]):\n array.append(np_data[:, i])\n\n return np.array(array)", "def get_data(data):\n\n np_data = np.array(data)\n array = []\n\n for i in range(0, np_data.shape[1]):\n array.appe...
[ "0.7534442", "0.7534442", "0.70013255", "0.6785662", "0.6688916", "0.6645851", "0.6567787", "0.65319115", "0.6462152", "0.6382598", "0.63573694", "0.62963057", "0.6281688", "0.627003", "0.623539", "0.62131137", "0.61992896", "0.6181052", "0.6180269", "0.616434", "0.6149688", ...
0.573665
41
VGG16 construct for training backbone
def __init__(self, num_classes): super(VGG16, self).__init__() self.vgg16_feature_extractor = VGG16FeatureExtraction(weights_update=True) self.max_pool = nn.MaxPool2d(kernel_size=2, stride=2) self.classifier = VGG16Classfier() self.fc3 = _fc(in_channels=4096, out_channels=num_classes)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def build_vgg():\n input_shape = (256, 256, 3)\n\n vgg = keras.applications.VGG19(include_top = False , input_shape = input_shape , weights=\"imagenet\")\n features = vgg.get_layer(index = 9).output\n\n model = keras.Model(inputs=[vgg.inputs], outputs=[features])\n return model", "def vgg16(pretr...
[ "0.7914276", "0.7741606", "0.7713254", "0.7710423", "0.766079", "0.7593987", "0.75367546", "0.7439226", "0.7426811", "0.7363272", "0.7344508", "0.73206055", "0.7303242", "0.7245254", "0.72279394", "0.7205576", "0.71928155", "0.7183653", "0.71666634", "0.7141517", "0.7109456",...
0.7155615
19
This function helps find coordinates of parallel lines. It uses an orthogonal vector to work out offsets to calculate coordinates of lines parallel to (x1,y1) > (x2,y2), with a given magnitude
def offset(x1,y1,x2,y2,magnitude): norm = math.sqrt((y2-y1)**2 + (x1-x2)**2) / magnitude offset_x = (y2-y1)/norm offset_y = (x1-x2)/norm return offset_x, offset_y
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def direction_coordinates(self, gc_lines):\n lins = [(_line[0][mid], _line[0][mid + 1], _line[1][mid], _line[1][mid + 1])\n for _line, mid in zip(gc_lines, [len(_line[0]) // 2 for _line in gc_lines])\n if len(_line[0]) > 2]\n lens = [np.hypot(_line[0][0] - _line[0][-1], ...
[ "0.62555534", "0.6251809", "0.62259203", "0.619858", "0.6193819", "0.6192046", "0.61712044", "0.61675787", "0.6118712", "0.61126447", "0.60862005", "0.60712665", "0.60499895", "0.6047329", "0.60366255", "0.602664", "0.59670717", "0.5939942", "0.59200275", "0.5916448", "0.5909...
0.6228341
2
This algorithm creates a cut list for a cut of depth z_thickness between (x1,y1)>(x2,y2).
def line(x1,y1,x2,y2,z_thickness,laser): #Global variables that are used by all algorithms layers = int(z_thickness/laser["z_spacing"]) #Works out offset when beginning on a new layer taper = math.tan(math.radians(laser["kerf_angle"]/2)) * laser["z_spacing"] taper_x,taper_y = offset(x1,y1,x2,y2,taper) #Works out offset between each parallel scan on the same layer delta_x,delta_y = offset(x1,y1,x2,y2,laser["xy_spacing"]) #Works out maximum offset from starting line, we don't want to exceed this at any point. max_taper = math.tan(math.radians(laser["kerf_angle"]/2)) * (z_thickness) * 2 max_delta_x, max_delta_y = offset(x1,y1,x2,y2,max_taper) #max_delta_x, max_delta_y = 2*max_delta_x, 2*max_delta_y #Loops through each layer, in which we fit as many parallel raster scans as the maximum offset allows cutlist = [] for a in range(layers): new_x1,new_x2,new_y1,new_y2 = x1 + a*taper_x, x2 + a*taper_x, y1 + a*taper_y, y2 + a*taper_y i = 0 cutlist.append(["z_step", str(-laser["z_spacing"])]) while abs(new_x1-x1) < abs(max_delta_x) or abs(new_y1-y1) < abs(max_delta_y): #This use of i is to reduce the jump distance between individual scans if i % 2 == 0: cutlist.append(["jump", f"{new_x1:.6f}", f"{new_y1:.6f}"]) cutlist.append(["mark", f"{new_x2:.6f}", f"{new_y2:.6f}"]) else: cutlist.append(["jump", f"{new_x2:.6f}", f"{new_y2:.6f}"]) cutlist.append(["mark", f"{new_x1:.6f}", f"{new_y1:.6f}"]) new_x1,new_x2,new_y1,new_y2 = new_x1 + delta_x, new_x2 + delta_x, new_y1 + delta_y, new_y2 + delta_y i = i + 1 #Having completed one layer, the laser moves down to begin the next layer max_delta_x = max_delta_x - taper_x cutlist.insert(0, ["set_trigger4", "1", "0", "7", "8", "45"]) cutlist.append(["stop_trigger"]) return json.dumps(cutlist)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def pyramid_slice(x1,y1,x2,y2,z,delta,deltaz,taper_x,taper_y,taper_straight,layers):\r\n\tcutlist = []\r\n\ty_max = abs(y1-y2)\r\n\tfor a in range(layers):\r\n\t\ti = 0\r\n\t\tnew_x1, new_y1, new_x2, new_y2 = x1 - a*taper_x, y1-a*taper_straight, x2+a*taper_x, y2+a*taper_y\r\n\t\twhile abs(new_y1 - (y1 - a*taper_st...
[ "0.6802484", "0.639531", "0.570502", "0.55743366", "0.5564015", "0.54822946", "0.54609275", "0.54286385", "0.541968", "0.54144114", "0.5410887", "0.5384912", "0.5378323", "0.53586614", "0.53429186", "0.53311557", "0.53209656", "0.52847123", "0.52787846", "0.5276502", "0.52765...
0.555994
5
This algorithm returns a cutlist which describes a series of parallel lines, each with a different z value, to calibrate the z value for the laser.
def z_focus(block,cut,laser): cutlist = [] iterations = int(cut["final_dimension_z"]/laser["z_spacing"]) #Currently x,y is decided to take up a good amount of the block, rather than having set distances and sizes y = cut["final_dimension_y"]/2 offset = laser["xy_spacing"] x = 0 cutlist.append(["z_abs","0"]) for a in range(iterations): cutlist.append(["jump", f"{x:.6f}", f"{y:.6f}"]) cutlist.append(["mark", f"{x:.6f}", f"{-y:.6f}"]) cutlist.append(["z_rel", str(-laser["z_spacing"])]) x = x + offset cutlist.insert(0, ["set_trigger4", "1", "0", "7", "8", "45"]) cutlist.append(["stop_trigger"]) return json.dumps(cutlist)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def buildcutlineset():\r\n cutlineset=[[[-3.2697,-3.2697],[-4.3304,-4.3304]],[[-3.2697,-4.3304],[-4.3304,-3.2697]]]\r\n cutlineset.extend([[[-3.2697,176.0104],[-4.3304,174.9497]],[[-3.2697,174.9497],[-4.3304,176.0104]]])\r\n cutlineset.extend([[[176.0104,176.0104],[174.9497,174.9497]],[[176.0104,174.9497]...
[ "0.6449699", "0.6432063", "0.59427017", "0.5941269", "0.5936059", "0.5897232", "0.5840504", "0.5797004", "0.57667243", "0.57253015", "0.5718147", "0.5635782", "0.56259125", "0.5621187", "0.55818194", "0.5545392", "0.5544098", "0.5543384", "0.55397063", "0.553851", "0.55319935...
0.5417235
28
This algorithm returns a cutlist which performs a simple core operation. The laser runs race track style around the specified core, going around all 4 sides before the laser moves down to the next layer. The poly is expected to fall off the core at the end of the entire cutting operation.
def simple_core(block,cut,laser): layers = int(block["thickness"]/laser["z_spacing"]) #Since all cuts are square, the offsets are more obvious than in the general linear case. taper = math.tan(math.radians(laser["kerf_angle"]/2)) * laser["z_spacing"] max_delta = math.tan(math.radians(laser["kerf_angle"]/2)) * (block["thickness"] + laser["z_final_overshoot"]) * 2 cutlist = [] cutlist.append(["a_abs", "0"]) cutlist.append(["c_abs", str(block["physical_rotation"])]) cutlist.append(["z_abs", str(block["thickness"])]) for a in range(layers): x1, y1 = cut["final_dimension_x"]/2 + a*taper, cut["final_dimension_y"]/2 + a*taper while abs(x1-cut["final_dimension_x"]/2) < abs(max_delta): cutlist.append(["jump", str(x1 + block["origin_x"]), str(y1 + block["origin_y"])]) cutlist.append(["mark", str(x1 + block["origin_x"]), str(-y1 + block["origin_y"])]) cutlist.append(["mark", str(-x1 + block["origin_x"]), str(-y1 + block["origin_y"])]) cutlist.append(["mark", str(-x1 + block["origin_x"]), str(y1 + block["origin_y"])]) cutlist.append(["mark", str(x1 + block["origin_x"]), str(y1 + block["origin_y"])]) x1, y1 = x1 + laser["xy_spacing"], y1 + laser["xy_spacing"] cutlist.append(["z_step", str(-laser["z_spacing"])]) max_delta = max_delta - taper return json.dumps(cutlist)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_traces_for_core(traces, traces_per_core, core_num):\n start = traces_per_core * core_num\n end = min(len(traces), traces_per_core * (core_num + 1))\n return traces[start:end]", "def minimize_core(s, core):\n for i in range(len(core)):\n new_core = core[:i] + core[i+1:]\n print \...
[ "0.5738572", "0.5688451", "0.547744", "0.52396184", "0.52205116", "0.51830083", "0.5107131", "0.50293916", "0.50252926", "0.5000583", "0.49980915", "0.49932697", "0.49482554", "0.49387234", "0.49366552", "0.49193186", "0.49054828", "0.48359147", "0.48309058", "0.48211256", "0...
0.5529602
2
This algorithm returns a cutlist which performs a vertical core operation. The laser cuts off one side of poly at a time, rotating the block such that the edge of the laser "cone" is parallel to the SCD core. After one side of the block has been removed, the block is rotated 90 degrees and the algorithm repeats until all 4 sides have been removed.
def vertical_core(block,cut,laser): layers = int(block["thickness"]/laser["z_spacing"]) angle = math.radians(laser["kerf_angle"]/2) taper = math.tan(angle) * laser["z_spacing"] u = math.tan(2 * angle) * (block["thickness"] + laser["z_final_overshoot"]) z_0 = block["thickness"]*math.cos(angle) + math.sin(angle)*((cut["final_dimension_y"])/2 - block["origin_y"] + u) z_1 = block["thickness"]*math.cos(angle) + math.sin(angle)*((cut["final_dimension_x"])/2 + block["origin_x"] + u) z_2 = block["thickness"]*math.cos(angle) + math.sin(angle)*((cut["final_dimension_y"])/2 + block["origin_y"] + u) z_3 = block["thickness"]*math.cos(angle) + math.sin(angle)*((cut["final_dimension_x"])/2 - block["origin_x"] + u) cutlist = [] cutlist.append(["a_abs", f"{math.degrees(angle):.6f}"]) cutlist.append(["c_abs", str(block["physical_rotation"])]) cutlist.append(["z_abs", f"{z_0:.6f}"]) y_start_wide = ((u + cut["final_dimension_x"]/2)* math.cos(angle) - block["thickness"]*math.sin(angle) - u/math.cos(angle)) y_start_length = ((u + cut["final_dimension_y"]/2)* math.cos(angle) - block["thickness"]*math.sin(angle) - u/math.cos(angle)) depth_cut = (block["thickness"] + laser["z_final_overshoot"]) * math.cos(angle)/math.cos(2*angle) cut1 = json.loads(line(block["width"]/2 - block["origin_x"],y_start_length - block["origin_y"],-block["width"]/2 - block["origin_x"],y_start_length - block["origin_y"],depth_cut,laser)) cut2 = json.loads(line(block["length"]/2 + block["origin_y"],y_start_wide - block["origin_x"],-block["length"]/2 + block["origin_y"],y_start_wide - block["origin_x"],depth_cut,laser)) cut3 = json.loads(line(block["width"]/2 + block["origin_x"],y_start_length + block["origin_y"],-block["width"]/2 + block["origin_x"],y_start_length + block["origin_y"],depth_cut,laser)) cut4 = json.loads(line(block["length"]/2 - block["origin_y"],y_start_wide + block["origin_x"],-block["length"]/2 - block["origin_y"],y_start_wide + block["origin_x"],depth_cut,laser)) #cut1 = json.loads(line(block["width"]/2,y_start_length,-block["width"]/2,y_start_length,depth_cut,laser)) #cut2 = json.loads(line(block["length"]/2,y_start_wide,-cut["final_dimension_y"]/2,y_start_wide,depth_cut,laser)) #cut3 = json.loads(line(block["width"]/2,y_start_length,-cut["final_dimension_x"]/2,y_start_length,depth_cut,laser)) #cut4 = json.loads(line(cut["final_dimension_y"]/2,y_start_wide,-cut["final_dimension_y"]/2,y_start_wide,depth_cut,laser)) cutlist = (cutlist + cut1 + [["c_rel", "90"],["z_abs", f"{z_1:.6f}"],] + cut2 + [["c_rel", "90"],["z_abs", f"{z_2:.6f}"]] + cut3 + [["z_abs", f"{z_3:.6f}"],["c_rel", "90"]] + cut4) cutlist.insert(0, ["set_trigger4", "1", "0", "7", "8", "45"]) cutlist.append(["stop_trigger"]) return json.dumps(cutlist)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def GetContourValuesLengthsAndSubContoursAndOrderOfSubContoursByFrame(\n watershed, allValsByFrame\n):\n scListByFrame, orderOfSCsByValueByFrame = GetSubContoursAndOrderingByFrame(\n watershed, allValsByFrame\n )\n cVLSByFrame = [[sc.cVLS() for sc in scList] for scList in scListByFrame]\n ret...
[ "0.5736482", "0.54933506", "0.5457592", "0.5425763", "0.5424649", "0.54244435", "0.5406228", "0.5358565", "0.53265375", "0.5317147", "0.5295013", "0.5287533", "0.5257076", "0.524541", "0.51966155", "0.51694214", "0.5141546", "0.51079696", "0.51050085", "0.5099054", "0.5085029...
0.6428014
0
This algorithm returns a cutlist which performs a cut which is a quarter of the total slicing required to create a pyramid top, while ensuring a flat bottom above it, both of which is required for an OG seed.
def pyramid_slice(x1,y1,x2,y2,z,delta,deltaz,taper_x,taper_y,taper_straight,layers): cutlist = [] y_max = abs(y1-y2) for a in range(layers): i = 0 new_x1, new_y1, new_x2, new_y2 = x1 - a*taper_x, y1-a*taper_straight, x2+a*taper_x, y2+a*taper_y while abs(new_y1 - (y1 - a*taper_straight)) < y_max and x1 > 0: if i % 2 == 0: cutlist.append(["jump", f"{new_x1:.6f}", f"{new_y1:.6f}"]) cutlist.append(["mark", f"{new_x2:.6f}", f"{new_y1:.6f}"]) else: cutlist.append(["jump", f"{new_x2:.6f}", f"{new_y1:.6f}"]) cutlist.append(["mark", f"{new_x1:.6f}", f"{new_y1:.6f}"]) new_y1 = new_y1-delta i = i + 1 if a < layers - 1: cutlist.append(["z_step", str(-deltaz)]) y_max = y_max - taper_straight - taper_y return cutlist
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_cuts(l, step, size):\n ncuts= (len(l)-size)/step + 1\n cuts= [None]*ncuts\n for i in xrange(ncuts): \n cuts[i]= l[i*step:i*step+size]\n if ncuts*step < len(l):\n cuts.append(l[ncuts*step:])\n return cuts", "def generate_possible_slices(L, H):\n n_min = 2 * L\n n_max = H...
[ "0.6510405", "0.6228084", "0.60608464", "0.60395974", "0.5939082", "0.59373015", "0.5816045", "0.5764991", "0.5731781", "0.5705467", "0.57034826", "0.57019746", "0.5698488", "0.56911135", "0.56729025", "0.5668522", "0.56589216", "0.5610545", "0.56084186", "0.5563644", "0.5558...
0.67580295
0
This algorithm returns a cutlist which performs OG slicing. It begins with an optional core, then cuts out slices until as many OG seeds as specified are removed from the block.
def oss_stacked(block, cut, laser): x0_1, x1_1, z0_1, taper_x_1, taper_y_1, layers_1, pyramid_angle_1 = oss_helper(block, cut, laser, cut["final_dimension_x"]/2) x0_2, x1_2, z0_2, taper_x_2, taper_y_2, layers_2, pyramid_angle_2 = oss_helper(block, cut, laser, cut["final_dimension_y"]/2) angle = math.radians(laser["kerf_angle"]/2) gap = math.tan(pyramid_angle_1) * (cut["final_dimension_x"]/2) + cut["gap_size"] unit_length = gap + cut["base_height"] max_slices = math.floor(block["thickness"]/unit_length) taper_straight = math.tan(angle)*(laser["z_spacing"]) if cut["core"] == "yes": cutlist = json.loads(vertical_core(block,cut,laser)) cutlist.pop() cutlist.pop(0) else: cutlist = [] a0 = -(90 + math.degrees(angle)) z_shift = (cut["base_height"] + gap) * math.sin(angle) x_shift = (cut["base_height"] + gap) * math.cos(angle) x_delta = math.sin(angle) * block["origin_x"] y_delta = math.sin(angle) * block["origin_y"] z1_delta = math.cos(angle) * block["origin_x"] z2_delta = math.cos(angle) * block["origin_y"] cutlist.append(["a_abs",f"{a0:.6f}"]) cutlist.append(["c_abs",str(block["physical_rotation"])]) cutlist.append(["z_abs",str(z0_1 + z2_delta)]) if pyramid_angle_1 >= angle and pyramid_angle_2 >= angle: if cut["num_of_seeds"] == "max": num_slices = max_slices else: num_slices = cut["num_of_seeds"] + 1 for i in range(num_slices): cutlist = (cutlist + pyramid_slice(cut["final_dimension_y"]/2 - block["origin_x"],x0_1 + y_delta,-cut["final_dimension_y"]/2 - block["origin_x"],x1_1 + y_delta,z0_1 + block["origin_y"],laser["xy_spacing"], laser["z_spacing"], taper_x_1,taper_y_1,taper_straight,layers_1) + [["z_abs",str(z0_2 + z1_delta)]] + [["c_abs","90"]] + pyramid_slice(cut["final_dimension_x"]/2 + block["origin_y"],x0_2 + x_delta,-cut["final_dimension_x"]/2 + block["origin_y"],x1_2 + x_delta,z0_2 + block["origin_x"],laser["xy_spacing"], laser["z_spacing"], taper_x_2,taper_y_2,taper_straight,layers_2) + [["z_abs",str(z0_1 - z2_delta)]] + [["c_abs","180"]] + pyramid_slice(cut["final_dimension_y"]/2 + block["origin_x"],x0_1 - y_delta,-cut["final_dimension_y"]/2 + block["origin_x"],x1_1 - y_delta,z0_1 - block["origin_y"],laser["xy_spacing"], laser["z_spacing"], taper_x_1,taper_y_1,taper_straight,layers_1) + [["z_abs",str(z0_2 - z1_delta)]] + [["c_abs","270"]] + pyramid_slice(cut["final_dimension_x"]/2 - block["origin_y"],x0_2 - x_delta,-cut["final_dimension_x"]/2 - block["origin_y"],x1_2 - x_delta,z0_2 - block["origin_x"],laser["xy_spacing"], laser["z_spacing"], taper_x_2,taper_y_2,taper_straight,layers_2) ) z0_1 = z0_1 + z_shift z0_2 = z0_2 + z_shift x0_1, x1_1, x0_2, x1_2 = x0_1 - x_shift, x1_1 - x_shift, x0_2 - x_shift, x1_2 - x_shift cutlist.append(["c_abs",str(block["physical_rotation"])]) cutlist.append(["z_abs",str(z0_1 + z2_delta)]) else: raise Exception("Pyramid angle too small") cutlist.insert(0, ["set_trigger4", "1", "0", "7", "8", "45"]) cutlist.append(["stop_trigger"]) return json.dumps(cutlist)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _iterative_cutting(g, p):\n\n to_be_processed = [g]\n K = math.ceil(len(g.nodes()) / p)\n\n res = []\n while len(to_be_processed) > 0:\n\n g = to_be_processed.pop()\n g_l, g_r = kernighan_lin_bisection(g, weight=\"rate\")\n\n ...
[ "0.55150145", "0.5500191", "0.5455198", "0.5378894", "0.53707415", "0.5246031", "0.5238846", "0.5228055", "0.51568055", "0.51228464", "0.51081675", "0.51019835", "0.5090313", "0.5088919", "0.50825244", "0.5066888", "0.50596964", "0.5026915", "0.5015684", "0.5010477", "0.50010...
0.5538502
0
This algorithm takes a cutlist and returns an estimate for the time
def time_taken(json_cutlist, laser): cutlist = json.loads(json_cutlist) time = 0 coordinate_array = [0, 0] for a in cutlist: if a[0] == "jump" or a[0] == "mark": coordinate_array = [float(a[1]) - coordinate_array[0], float(a[2]) - coordinate_array[1]] mag = math.sqrt(coordinate_array[0]**2 + coordinate_array[1]**2) if a[0] == "jump": time += mag/laser["jump_speed"] else: time += mag/laser["mark_speed"] coordinate_array = [float(a[1]), float(a[2])] elif a[0] == "z_abs" or a[0] == "z_rel": zSet = float(a[1]) elif a[0] == "c_abs" or a[0] == "c_rel": cSet = float(a[1]) elif a[0] == "a_abs" or a[0] == "a_rel": aSet = float(a[1]) else: pass return str(datetime.timedelta(seconds=int(time)))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def hit_time(tmat,start_list,targ_list,ntraj = 1000, cutoff=1000):\n \n # get state names\n nstates = tmat.shape[0]\n states = array([ii for ii in range(nstates)])\n \n times = list()\n \n for ii in range(ntraj):\n curr = choice(start_list)\n tm = 0\n while curr not in ...
[ "0.627223", "0.5568012", "0.55149806", "0.542661", "0.54003626", "0.5371899", "0.5357149", "0.5351884", "0.5332691", "0.53161633", "0.52837616", "0.52747995", "0.5273911", "0.52718514", "0.52711356", "0.5226085", "0.5219191", "0.51971257", "0.51944643", "0.5190102", "0.518243...
0.6458329
0
This function takes a cut_configuration json object and calls the function corresponding to the desired cut, thereby returning the cutlist.
def generateCutList(cut_configuration): #Check that this line reads json.loads(cut_configuration) input_json = json.load(cut_configuration) #Currently only desired_cut and laser_cut_config are required try: block = input_json["block"] except: pass try: cut = input_json["desired_cut"] laser = input_json["laser_cut_config"] except: raise Exception("Either desired_cut or laser_cut_config not provided") if cut["cut_process"] == "line": final_list = line(cut["x1"],cut["y1"],cut["x2"],cut["y2"],cut["final_dimension_z"]+laser["z_final_overshoot"],laser) elif cut["cut_process"] == "simple_core": final_list = simple_core(block,cut,laser) elif cut["cut_process"] == "vertical_core": final_list = vertical_core(block,cut,laser) elif cut["cut_process"] == "oss_stacked": final_list = oss_stacked(block,cut,laser) elif cut["cut_process"] == "z_focus": final_list = z_focus(block,cut,laser) elif cut["cut_process"] == "cross": final_list = cross(block,cut,laser) else: raise Exception("No such cut exists: Check cut_process") #print(time_taken(final_list, laser)) now = datetime.now() timestamp = str(now.strftime("%m-%d_%H_%M")) complete_name = os.path.join(save_path, timestamp+".csv") with open(complete_name, mode='w',newline ='') as test_data: data_writer = csv.writer(test_data, delimiter=',', quotechar='"', quoting=csv.QUOTE_MINIMAL) list_data = json.loads(final_list) for line1 in list_data: data_writer.writerow(line1) return final_list
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def loadCutFolder(config):\n cutfolder = QFramework.TQFolder(\"__baseCut__\") #not a name any user should use but still sort of readable\n cutfolder.setTagString(\".cutExpression\",\"1.\") #pass all events, this is only a common handle for all actual (sub)cuts\n cutfolder.setTagBool(\".skipJobs\",True) #d...
[ "0.6149866", "0.5558365", "0.55202043", "0.52206475", "0.5109341", "0.49454144", "0.4927624", "0.49099135", "0.49094042", "0.48717844", "0.4865596", "0.4846522", "0.47562224", "0.471486", "0.47056", "0.46879908", "0.4677999", "0.46680018", "0.46643007", "0.4607456", "0.454606...
0.71448123
0
Get updated learning rate.
def get_lr(self): # HACK: We need to check if this is the first time ``self.get_lr()`` was called, # since ``torch.optim.lr_scheduler._LRScheduler`` will call ``self.get_lr()`` # when first initialized, but the learning rate should remain unchanged # for the first epoch. if not self._initialized: self._initialized = True return self.base_lrs step = self.last_epoch + 1 self._cycle_counter = step - self._last_restart lrs = [ self.eta_min + ((lr - self.eta_min) / 2) * ( np.cos( np.pi * (self._cycle_counter % self._updated_cycle_len) / self._updated_cycle_len ) + 1 ) for lr in self.base_lrs ] if self._cycle_counter % self._updated_cycle_len == 0: # Adjust the cycle length. self._cycle_factor *= self.factor self._cycle_counter = 0 self._updated_cycle_len = int(self._cycle_factor * self.t_max) self._last_restart = step return lrs
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_learning_rate(self):\n config = self.config\n cur_lr = Train_model_pipeline.adjust_learning_rate(\n self.optimizer,\n self.epoch,\n config[\"training\"][\"learning_rate\"],\n decay=config[\"training\"][\"lr_decay_rate\"],\n step=config[\"...
[ "0.79935455", "0.79565597", "0.78638303", "0.76802355", "0.7612205", "0.7602973", "0.7580131", "0.74478555", "0.7442717", "0.74091333", "0.7381036", "0.7336032", "0.7336032", "0.7331142", "0.72541714", "0.72166234", "0.72166234", "0.71983415", "0.7194819", "0.70593596", "0.70...
0.61572826
75
check it has a solution
def has_solution(self) -> bool: pass
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def did_solve(self) -> bool:\n pass", "def is_solved(self):\n if not self._find_empty():\n return True\n else:\n return False", "def did_solve(self):\n return self._solution[\"status\"] == \"optimal\"", "def has_solution(self) -> bool:\n if self in [se...
[ "0.7137686", "0.7136563", "0.7095094", "0.70714545", "0.7049462", "0.7029765", "0.6964778", "0.6960238", "0.6939422", "0.68836486", "0.6876838", "0.6864891", "0.68153703", "0.6815134", "0.6724094", "0.6690701", "0.66641307", "0.6656697", "0.6656075", "0.65969825", "0.65919304...
0.8243674
0
Define the menu layout
def get_html(self) -> List[ComponentMeta]: menu = dbc.DropdownMenu( children=[ dbc.DropdownMenuItem(_menu_settings["header"], header=True), dbc.DropdownMenuItem( _menu_settings["item-0"][0], _menu_settings["item-0"][1], external_link=True, target="_blank", ), dbc.DropdownMenuItem( _menu_settings["item-1"][0], _menu_settings["item-1"][1], external_link=True, target="_blank", ), ], in_navbar=True, label="Learn More", color="light", right=True, ) return [menu]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def create_menus( self ):", "def create_menu():", "def create_layout() -> None:\n\n st.sidebar.title(\"Menu\")\n app_mode = st.sidebar.selectbox(\"Please select a page\", [' I. Homepage',\n \"II. Download data\" ,\n ...
[ "0.7228054", "0.7096764", "0.70145935", "0.6930015", "0.6892374", "0.68760407", "0.67835313", "0.66314065", "0.66196865", "0.65105206", "0.6458277", "0.6445279", "0.6445256", "0.6438403", "0.64325565", "0.63978887", "0.6381949", "0.637196", "0.63675284", "0.6361078", "0.63495...
0.0
-1
Initialize the navigation bar
def get_html(self) -> List[ComponentMeta]: nav = dbc.Navbar( className="penn-medicine-header px-0", children=html.Div( className="d-flex align-items-center w-100", children=[ html.Div( className="px-3", style={"width": "320px"}, children=html.A( href="https://www.pennmedicine.org", className="penn-medicine-header__logo", title="Go to the Penn Medicine home page", ), ), html.Div( className="flex-fill", children=dbc.Container( children=[ dbc.NavbarBrand( children=html.H1( style={"font": "inherit", "margin": "0"}, children=_brand_text, ), href="/", ) ] + self.menu.component ), ), ], ), dark=True, fixed="top", color="", ) return [nav]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def initialize(self):\n super(QtMainWindow, self).initialize()\n self.update_menu_bar()", "def init_menu_bar(self):\n # Load from file action\n load_action = QtGui.QAction(QtGui.QIcon('open.png'), '&From File...', self)\n load_action.setShortcut('Ctrl+L')\n load_action.s...
[ "0.68171155", "0.6107692", "0.6052145", "0.60315436", "0.60225713", "0.6012156", "0.6012156", "0.6012156", "0.6012156", "0.6012156", "0.6012156", "0.6012156", "0.6012156", "0.6012156", "0.6012156", "0.6012156", "0.6007548", "0.5998332", "0.59736603", "0.59092045", "0.5872221"...
0.0
-1
Reads Images in base directory DIR using 'classes' (computed from sub directories )
def preprocess_from_dir(DIR, classes=None, IMG_SIZE=(224,224), channels=3, per_class_size=None, normalize_train=False, mean_subtraction=None, isShuffle=True, save_data=False, destination_filename=None, verbose=1): return_classes_flag = False data = [] if not exists(DIR): raise ValueError('The specified directory does not exist') if IMG_SIZE is None: raise ValueError('IMG_SIZE must be specified') if not isinstance(IMG_SIZE, tuple) or len(IMG_SIZE) != 2: raise ValueError('IMG_SIZE must be a tuple of size 2 (width,height)') if verbose in [0,1]: if verbose == 0: display_count = False else: display_count = True else: raise ValueError('verbose flag must be either 1 (display progress to terminal) or 0 otherwise') if not isinstance(save_data, bool): raise ValueError('save_data must be a boolean (True/False)') if classes is None: return_classes_flag = True else: if not isinstance(classes, list): raise ValueError('"classes" must be a list') if save_data: if destination_filename is None: raise ValueError('Specify a destination file name') elif not ('.npy' in destination_filename or '.npz' in destination_filename): raise ValueError('Specify the correct numpy destination file extension (.npy or .npz)') if not save_data and destination_filename is not None: destination_filename = None # Loading from Numpy Files if destination_filename is not None and exists(destination_filename): print('[INFO] Loading from Numpy Files') since = time.time() data = np.load(destination_filename, allow_pickle=True) end = time.time() took = end - since print('----------------------------------------------') print(f'[INFO] Loaded in {took:.0f}s from Numpy Files') return data # Extracting image data and adding to `data` else: if destination_filename is not None: print(f'[INFO] Could not find {destination_filename}. Generating the training data') else: print('[INFO] Could not find a file to load from. Generating the training data') print('----------------------------------------------') # Starting timer since_preprocess = time.time() if classes is None: classes = get_classes_from_dir(DIR) # Removing false folders classes = _check_for_false_folders(DIR, classes) if per_class_size is None: per_class_size = len(listdir(minijoin(DIR, classes[0]), verbose=0)) if mean_subtraction is not None: # Checking if 'mean_subtraction' values are valid. Returns boolean value subtract_mean = _check_mean_sub_values(mean_subtraction, channels) for item in classes: class_path = minijoin(DIR, item) class_label = classes.index(item) count = 0 tens_list = list_images(class_path, use_fullpath=True, verbose=0) for image_path in tens_list: if count != per_class_size: # image_path = minijoin(class_path, image) # Returns the resized image (ignoring aspect ratio since it isn't relevant for Deep Computer Vision models) tens = imread(image_path, target_size=IMG_SIZE, rgb=True) if tens is None: continue # Gray if channels == 1: tens = to_gray(tens) # Normalizing if normalize_train: tens = normalize(tens) # Subtracting Mean # Mean must be calculated ONLY on the training set if mean_subtraction is not None and subtract_mean: mean_subtract = MeanProcess(mean_subtraction, channels) tens = mean_subtract.mean_preprocess(tens, channels) # Appending to train set data.append([tens, class_label]) count +=1 if display_count is True: _printTotal(count, item) else: break # Shuffling the Training Set if isShuffle is True: data = shuffle(data) # Converting to Numpy data = to_array(data) # Saves the Data set as a .npy file if save_data: #Converts to Numpy and saves if destination_filename.endswith('.npy'): print('[INFO] Saving as .npy file') elif destination_filename.endswith('.npz'): print('[INFO] Saving as .npz file') # Saving since = time.time() np.save(destination_filename, data) end = time.time() time_elapsed = end-since minu_elapsed = time_elapsed // 60 sec_elapsed = time_elapsed % 60 print(f'[INFO] {destination_filename} saved! Took {minu_elapsed:.0f}m {sec_elapsed:.0f}s') #Returns Training Set end_preprocess = time.time() time_elapsed_preprocess = end_preprocess - since_preprocess minu = time_elapsed_preprocess // 60 sec = time_elapsed_preprocess % 60 print('----------------------------------------------') print(f'[INFO] {len(data)} files preprocessed! Took {minu:.0f}m {sec:.0f}s') if return_classes_flag: return data, classes else: return data
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def process_images():\n create_dirs()\n for root, dirs, files in os.walk(IN):\n for name in files:\n if name[0] == '.':\n continue\n process_image(name)", "def _get_filenames_and_classes(dataset_dir):\n # print 'DATASET DIR:', dataset_dir\n # print 'subdir:', [name for name in os.list...
[ "0.7321848", "0.68896234", "0.6826946", "0.66698617", "0.6657406", "0.6633416", "0.65999025", "0.6553935", "0.6523276", "0.6521498", "0.64866954", "0.64619726", "0.6459432", "0.6443935", "0.6438473", "0.64238954", "0.64133865", "0.6406419", "0.6398897", "0.63965183", "0.63886...
0.5859994
92
Normalizes the data to mean 0 and standard deviation 1
def normalize(x, dtype='float32'): # x/=255.0 raises a TypeError # x = x/255.0 # Converting to float32 and normalizing (float32 saves memory) x = x.astype(dtype) / 255 return x
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def normalize(data):\n data = numpy.asmatrix(data)\n std_devs = numpy.std(data, axis=1)\n std_devs[std_devs == 0] = 1 # prevent div by 0\n return (data - numpy.mean(data, axis=1)) / std_devs", "def normalize_data(data):\n mean = np.mean(data)\n std = np.std(data)\n return (data - mean) / std...
[ "0.8334346", "0.8145812", "0.8115416", "0.80993706", "0.80993706", "0.8077115", "0.7967708", "0.78944945", "0.78458416", "0.78052527", "0.7734523", "0.7707518", "0.770103", "0.76728106", "0.76322484", "0.75377613", "0.75027645", "0.74743336", "0.7383516", "0.7299807", "0.7280...
0.0
-1
Step O must be finished before step C can begin.
def convert_input_text(text): steps = defaultdict(list) predecessors = set() for line in text: regex = search(r"Step (.) must be finished before step (.) can begin.", line) # steps[step] = [list of predecessors] steps[regex.group(2)].append(regex.group(1)) predecessors.add(regex.group(1)) for key in predecessors - set(steps): steps[key] = [] return steps
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def do_step(self) -> None:", "def _step(self) -> None:", "def _step(self):\n pass", "def step(self):\n #1. Time progresses\n self.time_operator.step()\n \n #2. Form and dissolve relationships\"\n self.relationship_operator.step()\n\n #3. HIV transmission\n ...
[ "0.63544285", "0.63411146", "0.6261979", "0.61237234", "0.59484553", "0.58107793", "0.5798005", "0.5778751", "0.57389134", "0.56745815", "0.56745815", "0.56745815", "0.5651764", "0.56497353", "0.5640354", "0.5634729", "0.5625408", "0.55929816", "0.55789346", "0.5559355", "0.5...
0.0
-1
Make sure that we can pop from the dictionary's value.
def test_remove_predecessors(): assert remove_predecessors({"A": ["B", "C"]}, "B") == {"A": ["C"]} assert remove_predecessors({"A": ["B", "C"]}, "D") == {"A": ["B", "C"]}
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def pop(self, key, *args):\n return super(ReadOnlyDict, self).pop(key, *args) # pragma: no cover", "def popitem(self):\n return super(ReadOnlyDict, self).popitem()", "def pop(self, key):\n pass", "def remove_value(self, thing_key, dkey):\n if thing_key in self.things_dict:\n ...
[ "0.7131473", "0.7101309", "0.67752934", "0.67436886", "0.6718935", "0.6602139", "0.6528528", "0.64869654", "0.648454", "0.6478647", "0.6470268", "0.6445865", "0.64151543", "0.64034253", "0.6395446", "0.62999696", "0.6270913", "0.62059563", "0.6182744", "0.61714023", "0.616790...
0.0
-1
Compare the sort to the example.
def test_find_sequential_ordering(): example = { "C": [], "A": ["C"], "F": ["C"], "B": ["A"], "D": ["A"], "E": ["B", "D", "F"], } assert find_sequential_ordering(example) == "CABDFE"
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_sort_array(self):\r\n self.assertEqual(sort_array([6, 4, 9, 10]), [4, 6, 9, 10])", "def test_insertSort(self):\n\t\tsortObj=insertSort()\n\t\tself.assertEqual(sortObj.run_sort(self.test_1[0]),self.test_1[1])", "def test_sort(self):\n expected = [\n self.TDTT(when=self.dt_when ...
[ "0.67170554", "0.6628306", "0.6544598", "0.65049016", "0.64927745", "0.64916337", "0.64914834", "0.6488707", "0.64275587", "0.64019525", "0.6396425", "0.6384852", "0.6351696", "0.6299925", "0.6294106", "0.62695056", "0.62650406", "0.62421703", "0.6240233", "0.6208013", "0.620...
0.0
-1
Compare the duration to the example.
def test_find_parallel_duration(): pt2_example = { "C": [], "A": ["C"], "F": ["C"], "B": ["A"], "D": ["A"], "E": ["B", "D", "F"], } assert find_parallel_duration(pt2_example, 2, 0) == 15
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_duration(self):\n for duration_, _, _ in self.test_cases:\n self.assertEqual(Rest(duration_).duration, duration_)", "def test_duration_property(self):\n recording_dt = 0.1\n recording_shape = {\n 'no_timesteps': 1000,\n 'no_sweeps': 10,\n ...
[ "0.6993373", "0.64634085", "0.6261238", "0.6072691", "0.6072691", "0.60668623", "0.60171825", "0.58592576", "0.58530915", "0.58014023", "0.56802076", "0.5668855", "0.5660802", "0.55880255", "0.55880255", "0.55513406", "0.5545389", "0.5539826", "0.55272925", "0.55245674", "0.5...
0.5158579
75
obtain a time duration between the recent events of the same bizLocation
def obtain_time_duration(collection, new_document): # Obtain the previously existing two document for the incoming bizLocation # Sort them in descending order # The first in the list is the newly inserted document detected by Change Streams # the second document is of interest prev_documents = collection.find({'epcList.epc': new_document['epcList'][0]['epc']}).limit(2).sort([("eventTime", DESCENDING)]) if prev_documents is not None: # if there is a previous set of documents prev_doc_list = list(prev_documents) # print(prev_doc_list) if len(prev_doc_list) == 1: logger.info('Only Single entry exists for Product.. It implies it is the a new product with no previous events.') return None else: logger.debug('Previous BizLocation of Product: {}, Present BizLocation of Product: {}'.format( prev_doc_list[1]['bizLocation']['id'], new_document['bizLocation']['id'])) logger.debug('Time Duration: From {} to {}'.format(prev_doc_list[1]['eventTime'], new_document['eventTime'])) # make the dictionary to return duration = { 'bizLocation': { 'prev': prev_doc_list[1]['bizLocation']['id'], 'present': new_document['bizLocation']['id'] }, 'from_time': prev_doc_list[1]['eventTime'].isoformat(timespec='milliseconds') + 'Z', 'to_time': new_document['eventTime'].isoformat(timespec='milliseconds') + 'Z' } # print(duration) return duration else: logger.info('No Previous Information of Event Found') return None
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def duration(self):\r\n return self.t2 - self.t1", "def duration(self):\r\n return (self.end_time or time.time()) - self.start_time", "def duration(self):\n return self._end - self._begin", "def GetDuration(self):\n return _gmat_py.LocatedEvent_GetDuration(self)", "def compute_go_duration(s...
[ "0.6247451", "0.6066179", "0.59149534", "0.587591", "0.58055454", "0.57777333", "0.57665247", "0.5748646", "0.5732722", "0.572424", "0.571313", "0.5707432", "0.56868815", "0.5675801", "0.565217", "0.5647044", "0.56372106", "0.56028074", "0.5601279", "0.5599238", "0.5544639", ...
0.6420903
0
Objective function to maximize total score over matches.
def objective_score(me, other, turns, noise, repetitions, match_attributes=None): match = axl.Match((me, other), turns=turns, noise=noise, match_attributes=match_attributes) if not match._stochastic: repetitions = 1 scores_for_this_opponent = [] for _ in range(repetitions): match.play() scores_for_this_opponent.append(match.final_score_per_turn()[0]) return scores_for_this_opponent
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def judge(self):\n self.bounds = 0.0\n self.best = self.lives[0]\n for life in self.lives:\n life.score = self.matchFun(life)\n self.bounds += life.score\n if self.best.score < life.score:\n self.best = life", "def j...
[ "0.6647923", "0.6647923", "0.6601076", "0.653043", "0.6438808", "0.63390124", "0.6311056", "0.62663805", "0.62656355", "0.6254761", "0.6223245", "0.62063557", "0.6162101", "0.61386317", "0.6135826", "0.61111224", "0.6108157", "0.61070305", "0.60964113", "0.608142", "0.607549"...
0.6370619
5
Objective function to maximize total score difference over matches.
def objective_score_diff(me, other, turns, noise, repetitions, match_attributes=None): match = axl.Match((me, other), turns=turns, noise=noise, match_attributes=match_attributes) if not match._stochastic: repetitions = 1 scores_for_this_opponent = [] for _ in range(repetitions): match.play() final_scores = match.final_score_per_turn() score_diff = final_scores[0] - final_scores[1] scores_for_this_opponent.append(score_diff) return scores_for_this_opponent
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def judge(self):\n self.bounds = 0.0\n self.best = self.lives[0]\n for life in self.lives:\n life.score = self.matchFun(life)\n self.bounds += life.score\n if self.best.score < life.score:\n self.best = life", "def j...
[ "0.6714346", "0.6714346", "0.65570205", "0.64613044", "0.6316741", "0.62868387", "0.6236504", "0.62348974", "0.6215959", "0.6197578", "0.61927944", "0.61494", "0.61126816", "0.6108994", "0.61036104", "0.60992247", "0.60669684", "0.6046158", "0.60387737", "0.6037669", "0.60316...
0.6550279
3
Objective function to maximize Moran fixations over N=4 matches
def objective_moran_win(me, other, turns, noise, repetitions, N=5, match_attributes=None): population = [] for _ in range(N): population.append(me.clone()) population.append(other.clone()) mp = axl.MoranProcess(population, turns=turns, noise=noise) scores_for_this_opponent = [] for _ in range(repetitions): mp.reset() mp.play() if mp.winning_strategy_name == str(me): scores_for_this_opponent.append(1) else: scores_for_this_opponent.append(0) return scores_for_this_opponent
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def perform_mindmatch(\n A: np.array, n_trim: int = None,\n n_match: int = 6, cois: list = None\n):\n # setting distance in the diagonal\n A[np.arange(len(A)), np.arange(len(A))] = -1000 \n\n # if conflict of interest (COIs) is available, add to the matrix\n cois = [(c1, c2) for (c1, c2) in cois\...
[ "0.6166018", "0.582967", "0.58052844", "0.58043885", "0.569874", "0.5562046", "0.5560261", "0.55063146", "0.5504146", "0.54418087", "0.5436338", "0.543598", "0.54290164", "0.539945", "0.53978187", "0.53978187", "0.5395617", "0.5393824", "0.53581244", "0.5314676", "0.53036773"...
0.5245856
30
Return the overall mean score of a Player
def score_player(player, objective, opponents_information, weights=None, sample_count=None): scores_for_all_opponents = [] if sample_count is not None: indices = np.random.choice(len(opponents_information), sample_count) opponents_information = [opponents_information[i] for i in indices] if weights is not None: weights = [weights[i] for i in indices] for strategy, init_kwargs in opponents_information: player.reset() opponent = strategy(**init_kwargs) scores_for_this_opponent = objective(player, opponent) mean_vs_opponent = mean(scores_for_this_opponent) scores_for_all_opponents.append(mean_vs_opponent) overall_mean_score = np.average(scores_for_all_opponents, weights=weights) return overall_mean_score
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_mean_score(rating_scores):\n return sum(rating_scores) / len(rating_scores)", "def getAverage(self):\n return sum(self.scores) / len(self.scores)", "def get_total_score(self):\n\n # Return the player's total score\n return self._total_score", "def calc_mean_score(movies: List[Mo...
[ "0.7505888", "0.7412792", "0.7344528", "0.72962356", "0.713673", "0.7091335", "0.7069279", "0.68148583", "0.68021953", "0.6720858", "0.6720858", "0.6720858", "0.6719883", "0.670838", "0.6703316", "0.66747105", "0.66253746", "0.66124016", "0.65976983", "0.6594148", "0.65919864...
0.61413795
86
Load the best num parameters from the given file.
def load_params(player_class, filename, num): parser = player_class.deserialize_parameters all_params = [] with open(filename) as datafile: reader = csv.reader(datafile) for line in reader: score, rep = float(line[-2]), line[-1] all_params.append((score, rep)) all_params.sort(reverse=True) best_params = [] for score, rep in all_params[:num]: best_params.append(parser(rep)) return best_params
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def loadBestFit(self):\n bestFit, err = [], []\n row = 0\n with open(self.filename) as f:\n for lines in f.readlines():\n line = lines.strip(\"\\n\")\n data = line.split(\" \")\n if row == self.Npar:\n for kk in range(s...
[ "0.6628179", "0.6617545", "0.6580962", "0.64695466", "0.64075434", "0.62310404", "0.6230305", "0.62199324", "0.61862564", "0.61166227", "0.6109736", "0.6079469", "0.5998742", "0.5996682", "0.5978318", "0.59457254", "0.5927295", "0.5894868", "0.5883919", "0.58676887", "0.58008...
0.73936313
0
Update UserCreate instance firstname
def update_firstname(state: UserCreate, firstname: str) -> None: state.name.first = firstname state.slug = slugify(f"super-user: {state.name.first} {state.name.last}")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def signup(self, request, user):\n user.first_name = self.cleaned_data['first_name']\n user.last_name = self.cleaned_data['last_name']\n user.save()\n\n return user", "def first_name(self, instance):\r\n return instance.user.first_name", "def register_user_first_name(self, me...
[ "0.7218886", "0.72107047", "0.7091783", "0.70805126", "0.68391097", "0.6802234", "0.6792022", "0.6786857", "0.668164", "0.66436106", "0.6567716", "0.65634876", "0.65209407", "0.64976525", "0.64976525", "0.6494254", "0.6456594", "0.6450713", "0.642974", "0.6408176", "0.6386372...
0.81902975
0
Build User response instance
def build_user(data: Dict[Any, Any]) -> User: return User(**data)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get():\n return prepare_response(get_user_info())", "def get_user_details(self, response):\n fullname, first_name, last_name = self.get_user_names(\n response.get(\"fullName\"),\n response.get(\"firstName\"),\n response.get(\"lastName\"),\n )\n ret...
[ "0.6529011", "0.6426592", "0.6373598", "0.63219744", "0.61717933", "0.6141898", "0.61418366", "0.6088162", "0.6084343", "0.6070999", "0.60210335", "0.6011697", "0.60002035", "0.5984306", "0.5978686", "0.59628445", "0.59532416", "0.5891522", "0.5889954", "0.58876634", "0.58819...
0.6766939
0
This methods runs one episode for a gym environment. deterministic == True => agent executes only greedy actions according the Q function approximator (no random actions). do_training == True => train agent
def run_episode(env, agent, deterministic, skip_frames=0, do_training=True, rendering=True, max_timesteps=10000, history_length=0, manual=False): stats = utils.EpisodeStats() # Save history image_hist = [] step = 0 state = env.reset() env.viewer.window.on_key_press = utils.key_press env.viewer.window.on_key_release = utils.key_release # fix bug of corrupted states without rendering in gym environment env.viewer.window.dispatch_events() # append image history to first state state = state_preprocessing(state) image_hist.extend([state] * (history_length + 1)) state = np.array(image_hist).reshape(96, 96, history_length + 1) while True: #skip intro zoom frames if step < 48: step += 1 env.step(utils.id_to_action(0)) continue # TODO: get action_id from agent # Hint: adapt the probabilities of the 5 actions for random sampling so that the agent explores properly. if do_training and manual: action_id = utils.manual_action else: action_id = agent.act(state, deterministic) action = utils.id_to_action(action_id) # Hint: frame skipping might help you to get better results. reward = 0 for _ in range(skip_frames + 1): next_state, r, terminal, info = env.step(action) reward += r if rendering: env.render() if terminal: break next_state = state_preprocessing(next_state) image_hist.append(next_state) image_hist.pop(0) next_state = np.array(image_hist).reshape(96, 96, history_length + 1) if do_training and (next_state[:82, :, -1].sum() > 5000): #track out of sight print('Track gone; finish this episode') agent.add(state, action_id, next_state, reward=-(skip_frames + 1), terminal=True) #punish break if do_training: agent.add(state, action_id, next_state, reward, terminal) if not manual: agent.train() stats.step(reward, action_id) state = next_state if terminal or (step * (skip_frames + 1)) > max_timesteps: break step += 1 return stats
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _train_simulate(self, env, train_episode=None):\n # The initial observation\n o_r_d_i = [env.reset()] + [None]*3 # o_r_d_i means \"Observation_Reward_Done_Info\"\n # Reset all the manager parameters\n self.reset(o_r_d_i[0][\"manager\"])\n done = False\n current_option...
[ "0.75989807", "0.7317887", "0.731042", "0.72842157", "0.72729677", "0.7271064", "0.71605587", "0.70899516", "0.70623356", "0.70282584", "0.70133144", "0.69955355", "0.6992601", "0.69857806", "0.6981672", "0.69651085", "0.69529545", "0.6945286", "0.6932808", "0.69287926", "0.6...
0.7439519
1
Split the course string in to a course number and a course postfix Expects all strings to be in the format numpostfix. For instance,
def _split_course_string(course_string): course_num = '' course_postfix = '' count = 0 for indx, char in enumerate(course_string): if not char.isdigit(): break course_num += char count += 1 try: course_num = int(course_num) except ValueError: logger.exception('Got an invalid course string: %s', course_string) raise InvalidCourseStringError(course_string) course_postfix = course_string[count:] return course_num, course_postfix
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def space_coursecodes(input_string):\n\n coursecode_pattern = re.compile(r\"[A-Z]{2,6}\\s{,1}-{,1}/{,1}[0-9]{2,6}-{,1}/{,1}([0-9]{2,6}){,1}\")\n\n # for any coursecodes in string, find beginning and end index of each\n ind_pairs = [(m.start(0), m.end(0)) for m in re.finditer(coursecode_pattern, input_stri...
[ "0.59728956", "0.5590606", "0.5450177", "0.533765", "0.5325984", "0.51580155", "0.5109719", "0.5105101", "0.5051153", "0.5044799", "0.49115965", "0.48173293", "0.48144338", "0.48039567", "0.47931984", "0.47742018", "0.4764439", "0.47197244", "0.47027755", "0.46838984", "0.462...
0.7670243
0
Add a new column named placeholder fill with the arg_1 value
def main(dataframe: pd.DataFrame, arg_1: str='nothing') -> pd.DataFrame: dataframe["placeholder"] = arg_1 return dataframe
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def insert_column(df, colTitle, colIndex, fillValue):\n if colTitle not in df.columns:\n df.insert(colIndex, colTitle, fillValue, True)\n return df", "def add_column_parameter(params, name, dataset, args, key):\n column_id = args.get_value(key, raise_error=False)\n if column_id is None:\n ...
[ "0.61651623", "0.60531425", "0.5932682", "0.55233854", "0.53711224", "0.5348192", "0.5308065", "0.52713895", "0.5266658", "0.51256454", "0.5117017", "0.51092947", "0.510111", "0.5097036", "0.5096841", "0.50488573", "0.50472313", "0.50351626", "0.50323486", "0.5029519", "0.502...
0.6702973
0
Insert a new node.
def insert(self, val): node = Node(val) current = self.root if self.root is None: self.root = node return node while current: if val >= current.val: if current.right is not None: current = current.right else: current.right = node break elif val < current.val: if current.left is not None: current = current.left else: current.left = node break return node
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __insert(self, node, value):\n #if DEBUG: print('\\t__insert({})'.format(value))\n\n new = Node(value, node.next)\n node.next = new\n return new", "def insert_node(self, data):\n\t\tif self.root is None:\n\t\t\tself.root = Node(data)\n\t\telse:\n\t\t\tcurrent_node = self.root\n\t\...
[ "0.7825037", "0.7765275", "0.7721343", "0.7457422", "0.74511695", "0.74140745", "0.73851484", "0.7339789", "0.7339789", "0.73216206", "0.73207855", "0.7279264", "0.72762483", "0.72737384", "0.72505534", "0.7235147", "0.7235147", "0.7208238", "0.719422", "0.71857184", "0.71846...
0.0
-1
Endpoint qui permet d'envoyer la convention de partenariat par mail pour une perm d'id {id}.
def send_convention(request, id): perm = perm_models.Perm.objects.get(pk=id) convention_template = get_template('convention_partenariat.html') convention_context = { 'perm': perm, 'articles': perm.get_convention_information()['perm_articles'], 'montant': round(perm.get_montant_deco_max(), 2), 'mail': True, } context_content = convention_template.render(convention_context) send_mail('Convention Perm Pic\'Asso', 'Pour lire ce message, merci d\'utiliser un navigateur ou un client mail compatible HTML.', DEFAULT_FROM_EMAIL, [perm.mail_resp], html_message=context_content) return Response(True)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def envoi_par_mail(self):\n cr , uid, context = self.env.args\n if not self.pool['res.users'].has_group(cr, uid, 'is_plastigray.is_comptable_group'):\n raise Warning(u\"Accès non autorisé !\")\n ids=[]\n for obj in self:\n ids.append(str(obj.id))\n if len(id...
[ "0.6005149", "0.5587609", "0.5556423", "0.5496287", "0.52939004", "0.5288747", "0.52882147", "0.52684224", "0.52666515", "0.5243778", "0.5187994", "0.5171401", "0.51496404", "0.5147281", "0.5110905", "0.50985926", "0.5092945", "0.50828236", "0.50735444", "0.5072316", "0.50681...
0.6470842
0
Endpoint qui permet d'envoyer le justificatif de paiement par mail pour une perm d'id {id}.
def send_justificatif(request, id): perm = perm_models.Perm.objects.get(pk=id) info = perm.get_justificatif_information() justificatif_template = get_template('justificatif_paiement.html') justificatif_context = { 'perm': perm, 'articles': info['perm_articles'], 'total_ht': info['total_ht'], 'total_ttc': info['total_ttc'], 'tva_amounts': info['tva_amounts'], 'mail': True, } context_content = justificatif_template.render(justificatif_context) send_mail('Justificatif paiement Pic\'Asso', 'Pour lire ce message, merci d\'utiliser un navigateur ou un client mail compatible HTML.', DEFAULT_FROM_EMAIL, [perm.mail_resp], html_message=context_content) return Response(True)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def send_convention(request, id):\n perm = perm_models.Perm.objects.get(pk=id)\n convention_template = get_template('convention_partenariat.html')\n convention_context = {\n 'perm': perm,\n 'articles': perm.get_convention_information()['perm_articles'],\n 'montant': round(perm.get_montant_d...
[ "0.57655394", "0.5716651", "0.56478196", "0.56307864", "0.55586624", "0.5510274", "0.54520583", "0.54152286", "0.5387207", "0.5373411", "0.5325026", "0.53217953", "0.529948", "0.52949625", "0.52771795", "0.5257251", "0.5247731", "0.5247631", "0.52378786", "0.52317333", "0.522...
0.63593924
0
Build the right part of the fireball function.
def build_rightpart(): # build in 1: (K dec) apply_card("put", 1) apply_slot(1, "dec") apply_card("K", 1) # build in 0: greg build_greg(0) # smash together to get (greg (K dec)) in 0 smash() # copy it to 1. apply_card("put", 1) apply_slot(1, "zero") apply_card("get", 1) # build horace in 0. build_horace(0) # smash together to get (horace (greg (K dec))) in 0. smash() # Wrap with an S. apply_card("S", 0) # build ian in 1. build_ian(1) # smash together to get ((S (horace (greg (K dec)))) ian) in 0. smash()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def build_fireball():\n # build the right part\n build_rightpart()\n\n # copy it to 4.\n copy(0, 4)\n\n # build the left part, now it's in 0\n build_leftpart()\n\n # copy right part from 4 to 1.\n copy(4, 1)\n # smash together for whole fireball.\n smash()", "def build_wall(): #py:...
[ "0.7823686", "0.5754557", "0.56262046", "0.5477109", "0.5417354", "0.53973466", "0.5374712", "0.53535587", "0.53477806", "0.5309742", "0.52957034", "0.5281329", "0.52798027", "0.5256859", "0.5233532", "0.5210926", "0.519959", "0.51973045", "0.51967686", "0.5172551", "0.516020...
0.0
-1
Build the left part of the fireball function. Doing this uses slots 0,1,2,3 and the result will be in slot 0. (S (horace ((S (horace ((S (horace (greg (K S)))) ((S (horace fanny)) (greg I))))) june ((S (horace fanny)) ian) ))) kelly
def build_leftpart(): # build kelly. build_kelly() # copy kelly to 3. copy(0, 3) # build june in slots 0,1,2 build_june() # copy kelly to slot 1 copy(3, 1) # smash together to get (june kelly) in 0 smash() # copy (june kelly) to 1 copy(0, 1) # build horace in 0 build_horace(0) # smash together to get (horace (june kelly)) in 0 smash() # wrap with an S for the whole left part. apply_card("S", 0)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def build_fireball():\n # build the right part\n build_rightpart()\n\n # copy it to 4.\n copy(0, 4)\n\n # build the left part, now it's in 0\n build_leftpart()\n\n # copy right part from 4 to 1.\n copy(4, 1)\n # smash together for whole fireball.\n smash()", "def keyLeft(self):\n ...
[ "0.70482737", "0.58627445", "0.58096784", "0.58096784", "0.5768745", "0.5619706", "0.55908644", "0.55596286", "0.5548311", "0.5537584", "0.55325747", "0.5531275", "0.5530027", "0.55278164", "0.54952", "0.5482912", "0.54783756", "0.54747903", "0.5452353", "0.5442784", "0.54378...
0.7919818
0
Build the fireball function. We'll apply the Y combinator to it. Stomps registers [0,4].
def build_fireball(): # build the right part build_rightpart() # copy it to 4. copy(0, 4) # build the left part, now it's in 0 build_leftpart() # copy right part from 4 to 1. copy(4, 1) # smash together for whole fireball. smash()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_fy(self):\n\n def fy(p):\n p0, p1 = p\n e = anp.exp(-(p0 + p1) * self.ts)\n x = (\n 1\n / (-p0 - p1)\n * anp.array(\n [\n [-p1 - p0 * e, -p1 + p1 * e],\n [-p...
[ "0.57334554", "0.54655373", "0.53826815", "0.53662187", "0.53366727", "0.5298539", "0.5281986", "0.52551913", "0.52137053", "0.520962", "0.5203715", "0.51821357", "0.51619667", "0.5104895", "0.51000744", "0.5096226", "0.5089348", "0.50706804", "0.50680226", "0.5058774", "0.50...
0.68100685
0
Initialize the Route analysis for the given inputs.
def __init__(self, **kwargs): self.pair_type = kwargs["pair_type"] self.origins = kwargs["origins"] self.origin_id_field = kwargs["origin_id_field"] self.destinations = kwargs["destinations"] self.dest_id_field = kwargs["dest_id_field"] self.network_data_source = kwargs["network_data_source"] self.travel_mode = kwargs["travel_mode"] self.time_units = kwargs["time_units"] self.distance_units = kwargs["distance_units"] self.time_of_day = kwargs["time_of_day"] self.reverse_direction = kwargs["reverse_direction"] self.scratch_folder = kwargs["scratch_folder"] self.assigned_dest_field = kwargs["assigned_dest_field"] self.od_pair_table = kwargs["od_pair_table"] self.origin_transfer_fields = kwargs["origin_transfer_fields"] self.destination_transfer_fields = kwargs["destination_transfer_fields"] self.barriers = [] if "barriers" in kwargs: self.barriers = kwargs["barriers"] # Create a job ID and a folder for this job self._create_job_folder() # Setup the class logger. Logs for each parallel process are not written to the console but instead to a # process-specific log file. self.setup_logger("RoutePairs") # Get field objects for the origin and destination ID fields since we need this in multiple places self.origin_id_field_obj = arcpy.ListFields(self.origins, wild_card=self.origin_id_field)[0] self.dest_id_field_obj = arcpy.ListFields(self.destinations, wild_card=self.dest_id_field)[0] # Set up other instance attributes self.is_service = helpers.is_nds_service(self.network_data_source) self.rt_solver = None self.solve_result = None self.input_origins_layer = "InputOrigins" + self.job_id self.input_destinations_layer = "InputDestinations" + self.job_id self.input_origins_layer_obj = None self.input_dests_layer_obj = None self.origin_unique_id_field_name = "OriginUniqueID" self.dest_unique_id_field_name = "DestinationUniqueID" self.od_pairs = None # Create a network dataset layer if needed if not self.is_service: self._make_nds_layer() # Prepare a dictionary to store info about the analysis results self.job_result = { "jobId": self.job_id, "jobFolder": self.job_folder, "solveSucceeded": False, "solveMessages": "", "outputRoutes": "", "logFile": self.log_file }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def initialize(self):\n self.initialize_edges()\n self.initialize_prob()\n self.initialize_total_input_dict()\n\n self.initialize_fpmusigv_dict()", "def initialize_rt_solver(self):\r\n # For a local network dataset, we need to checkout the Network Analyst extension license.\r\n...
[ "0.6117074", "0.59076273", "0.5906306", "0.58633006", "0.5775825", "0.5771856", "0.57236266", "0.5721005", "0.57191586", "0.5712019", "0.5708202", "0.56965554", "0.56650615", "0.56602186", "0.5631149", "0.56289285", "0.56277144", "0.56277144", "0.56152123", "0.5613303", "0.56...
0.0
-1
Initialize a Route solver object and set properties.
def initialize_rt_solver(self): # For a local network dataset, we need to checkout the Network Analyst extension license. if not self.is_service: arcpy.CheckOutExtension("network") # Create a new Route object self.logger.debug("Creating Route object...") self.rt_solver = arcpy.nax.Route(self.network_data_source) # Set the Route analysis properties. # Read properties from the rt_config.py config file for all properties not set in the UI as parameters. # Route properties documentation: https://pro.arcgis.com/en/pro-app/latest/arcpy/network-analyst/route.htm # The properties have been extracted to the config file to make them easier to find and set so users don't have # to dig through the code to change them. self.logger.debug("Setting Route analysis properties from RT config file...") for prop, value in RT_PROPS.items(): if prop in RT_PROPS_SET_BY_TOOL: self.logger.warning(( f"Route config file property {prop} is handled explicitly by the tool parameters and will be " "ignored." )) continue try: setattr(self.rt_solver, prop, value) if hasattr(value, "name"): self.logger.debug(f"{prop}: {value.name}") else: self.logger.debug(f"{prop}: {value}") except Exception as ex: # pylint: disable=broad-except # Suppress warnings for older services (pre 11.0) that don't support locate settings and services # that don't support accumulating attributes because we don't want the tool to always throw a warning. if not (self.is_service and prop in [ "searchTolerance", "searchToleranceUnits", "accumulateAttributeNames" ]): self.logger.warning( f"Failed to set property {prop} from RT config file. Default will be used instead.") self.logger.warning(str(ex)) # Set properties explicitly specified in the tool UI as arguments self.logger.debug("Setting Route analysis properties specified tool inputs...") self.rt_solver.travelMode = self.travel_mode self.logger.debug(f"travelMode: {self.travel_mode}") self.rt_solver.timeUnits = self.time_units self.logger.debug(f"timeUnits: {self.time_units}") self.rt_solver.distanceUnits = self.distance_units self.logger.debug(f"distanceUnits: {self.distance_units}") self.rt_solver.timeOfDay = self.time_of_day self.logger.debug(f"timeOfDay: {self.time_of_day}")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __init__(self):\n self.RRTFamilySolver = RRTFamilyPathPlanner()\n self.PRMSolver = PRMPathPlanner()", "def __init__(self):\n\n # Set a node name - something relevant\n rospy.init_node('waypoint_updater')\n\n # Most recent pose\n self.pose = None\n\n # Map wayp...
[ "0.70928293", "0.6806993", "0.67323405", "0.6620287", "0.65866923", "0.6567693", "0.6503277", "0.6323346", "0.6312729", "0.62945217", "0.6200703", "0.6181314", "0.61419207", "0.6131141", "0.6076377", "0.6046717", "0.6043842", "0.59901327", "0.59900606", "0.5955468", "0.594591...
0.75314903
0
Add fields to input Stops with the origin and destination's original unique IDs.
def _add_unique_id_fields(self): field_types = {"String": "TEXT", "Single": "FLOAT", "Double": "DOUBLE", "SmallInteger": "SHORT", "Integer": "LONG", "OID": "LONG"} origin_field_def = [self.origin_unique_id_field_name, field_types[self.origin_id_field_obj.type]] if self.origin_id_field_obj.type == "String": origin_field_def += [self.origin_unique_id_field_name, self.origin_id_field_obj.length] dest_field_def = [self.dest_unique_id_field_name, field_types[self.dest_id_field_obj.type]] if self.dest_id_field_obj.type == "String": dest_field_def += [self.dest_unique_id_field_name, self.dest_id_field_obj.length] self.rt_solver.addFields(arcpy.nax.RouteInputDataType.Stops, [origin_field_def, dest_field_def])
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _insert_stops_one_to_one(self): # pylint: disable=too-many-locals\r\n # Use an insertCursor to insert Stops into the Route analysis\r\n destinations = {}\r\n destination_rows = []\r\n with self.rt_solver.insertCursor(\r\n arcpy.nax.RouteInputDataType.Stops,\r\n ...
[ "0.6808154", "0.65527296", "0.6456082", "0.5107205", "0.4909032", "0.4837304", "0.48278338", "0.47855443", "0.4784247", "0.47640702", "0.47625896", "0.46876457", "0.4668027", "0.46508414", "0.46428096", "0.46241915", "0.45510978", "0.45495996", "0.45336407", "0.45158657", "0....
0.7586507
0
Create layers from the origins so the layer contains only the desired inputs for the chunk.
def _select_inputs_one_to_one(self, origins_criteria): # Select the origins with ObjectIDs in this range self.logger.debug("Selecting origins for this chunk...") origins_oid_field_name = arcpy.Describe(self.origins).oidFieldName origins_where_clause = ( f"{origins_oid_field_name} >= {origins_criteria[0]} " f"And {origins_oid_field_name} <= {origins_criteria[1]}" ) self.logger.debug(f"Origins where clause: {origins_where_clause}") self.input_origins_layer_obj = helpers.run_gp_tool( self.logger, arcpy.management.MakeFeatureLayer, [self.origins, self.input_origins_layer, origins_where_clause], ).getOutput(0) num_origins = int(arcpy.management.GetCount(self.input_origins_layer_obj).getOutput(0)) self.logger.debug(f"Number of origins selected: {num_origins}") # Make a layer for destinations for quicker access helpers.run_gp_tool( self.logger, arcpy.management.MakeFeatureLayer, [self.destinations, self.input_destinations_layer], )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def build_layers(self):\n raise NotImplementedError", "def _make_layer(self, X, name, block, num_blocks, out_channels):\n\n for i in range(0, num_blocks):\n X = block(X, name = name + '_block{}'.format(i), out_channels=out_channels)\n return X", "def _build(self, inputs):\n\n ...
[ "0.58452713", "0.57073355", "0.5678293", "0.567758", "0.566787", "0.5503139", "0.54909784", "0.5411151", "0.53651917", "0.5347449", "0.53288347", "0.5283121", "0.52772504", "0.52497953", "0.5245188", "0.5234502", "0.52329576", "0.5224384", "0.51927054", "0.51888996", "0.51857...
0.5049388
37
Retrieve a list of OD pairs included in this chunk.
def _get_od_pairs_for_chunk(self, chunk_definition): # Read the relevant rows from the CSV chunk_num, chunk_size = chunk_definition # Explicitly set data types dtypes = { 0: helpers.PD_FIELD_TYPES[self.origin_id_field_obj.type], 1: helpers.PD_FIELD_TYPES[self.dest_id_field_obj.type] } df_od_pairs = pd.read_csv( self.od_pair_table, header=None, skiprows=chunk_size*chunk_num, nrows=chunk_size, dtype=dtypes ) self.od_pairs = df_od_pairs.values.tolist()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_pairs(self):\n return self.pairs", "def pairs(self):\n return self.items() if self.is_a(dict) else self.chunks(2)", "def pairs(self):\n return self.__pairs", "def getOrderList(self):\r\n\t\treturn self.pair.orders", "async def fetch_trading_pairs() -> List[str]:\n raise NotI...
[ "0.61472523", "0.58992565", "0.5761564", "0.567055", "0.5611011", "0.55578417", "0.5545772", "0.5545772", "0.5542505", "0.55348593", "0.5519136", "0.55052334", "0.5441358", "0.5352025", "0.53083766", "0.52903664", "0.5279547", "0.5259124", "0.5257976", "0.52337116", "0.521395...
0.6435109
0
Create layers that include only the origins and destinations relevant to this chunk.
def _select_inputs_many_to_many(self): # Select the origins present in this chunk of predefined OD pairs self.logger.debug("Selecting origins for this chunk...") origins_in_chunk = set([pair[0] for pair in self.od_pairs]) if isinstance(self.od_pairs[0][0], (int, float,)): origin_string = ", ".join([str(o_id) for o_id in origins_in_chunk]) else: origin_string = "'" + "', '".join([str(o_id) for o_id in origins_in_chunk]) + "'" origins_where_clause = f"{self.origin_id_field} IN ({origin_string})" self.logger.debug(f"Origins where clause: {origins_where_clause}") self.input_origins_layer_obj = helpers.run_gp_tool( self.logger, arcpy.management.MakeFeatureLayer, [self.origins, self.input_origins_layer, origins_where_clause] ).getOutput(0) num_origins = int(arcpy.management.GetCount(self.input_origins_layer).getOutput(0)) self.logger.debug(f"Number of origins selected: {num_origins}") # Select the destinations present in this chunk of predefined OD pairs self.logger.debug("Selecting destinations for this chunk...") dests_in_chunk = set([pair[1] for pair in self.od_pairs]) if isinstance(self.od_pairs[0][1], (int, float,)): dest_string = ", ".join([str(d_id) for d_id in dests_in_chunk]) else: dest_string = "'" + "', '".join([str(d_id) for d_id in dests_in_chunk]) + "'" dests_where_clause = f"{self.dest_id_field} IN ({dest_string})" self.logger.debug(f"Destinations where clause: {dests_where_clause}") self.input_dests_layer_obj = helpers.run_gp_tool( self.logger, arcpy.management.MakeFeatureLayer, [self.destinations, self.input_destinations_layer, dests_where_clause] ).getOutput(0) num_dests = int(arcpy.management.GetCount(self.input_destinations_layer).getOutput(0)) self.logger.debug(f"Number of destinations selected: {num_dests}")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def make_layers(self):\n w, h = self.image.get_size()\n shrink = pg.transform.smoothscale(self.image, (w//2, h//2))\n self.mid_image = tools.tile_surface((w,h), shrink, True)\n shrink = pg.transform.smoothscale(self.image, (w//4, h//4))\n self.base = tools.tile_surface(prepare.SC...
[ "0.61868954", "0.61661166", "0.60497373", "0.5842511", "0.5815913", "0.5705256", "0.5667303", "0.5603573", "0.5603573", "0.54255944", "0.5378894", "0.53700405", "0.53343993", "0.5305683", "0.528266", "0.5267994", "0.5221907", "0.52087885", "0.5189929", "0.51773155", "0.517675...
0.0
-1
Insert the origins and destinations as Stops for the Route analysis for the onetoone case.
def _insert_stops_one_to_one(self): # pylint: disable=too-many-locals # Use an insertCursor to insert Stops into the Route analysis destinations = {} destination_rows = [] with self.rt_solver.insertCursor( arcpy.nax.RouteInputDataType.Stops, ["RouteName", "Sequence", self.origin_unique_id_field_name, "SHAPE@", self.dest_unique_id_field_name] + self.origin_transfer_fields ) as icur: # Loop through origins and insert them into Stops along with their assigned destinations for origin in arcpy.da.SearchCursor( # pylint: disable=no-member self.input_origins_layer, ["SHAPE@", self.origin_id_field, self.assigned_dest_field] + self.origin_transfer_fields ): dest_id = origin[2] if dest_id is None: continue if dest_id not in destinations: dest_val = f"'{dest_id}'" if isinstance(dest_id, str) else dest_id with arcpy.da.SearchCursor( # pylint: disable=no-member self.input_destinations_layer, ["SHAPE@", self.dest_id_field] + self.destination_transfer_fields, where_clause=f"{self.dest_id_field} = {dest_val}" ) as cur: try: destinations[dest_id] = next(cur) except StopIteration: # The origin's destination is not present in the destinations table. Just skip the origin. continue # Insert origin and destination destination = destinations[dest_id] if self.reverse_direction: route_name = f"{dest_id} - {origin[1]}" origin_sequence = 2 destination_sequence = 1 else: route_name = f"{origin[1]} - {dest_id}" origin_sequence = 1 destination_sequence = 2 # Define the final origin and destination rows for the input Stops origin_row = [route_name, origin_sequence, origin[1], origin[0], None] + list(origin)[3:] destination_row = [route_name, destination_sequence, None, destination[0], destination[1]] + \ list(destination)[2:] icur.insertRow(origin_row) destination_rows.append(destination_row) # Insert destinations with self.rt_solver.insertCursor( arcpy.nax.RouteInputDataType.Stops, ["RouteName", "Sequence", self.origin_unique_id_field_name, "SHAPE@", self.dest_unique_id_field_name] + self.destination_transfer_fields ) as dcur: for row in destination_rows: dcur.insertRow(row)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _insert_stops_many_to_many(self):\r\n # Store data of the relevant origins and destinations in dictionaries for quick lookups and reuse\r\n o_data = {} # {Origin ID: [Shape, transferred fields]}\r\n for row in arcpy.da.SearchCursor( # pylint: disable=no-member\r\n self.input_o...
[ "0.65625894", "0.5583581", "0.5395718", "0.5393464", "0.5392419", "0.53610194", "0.5345976", "0.52621037", "0.5238447", "0.521201", "0.52106446", "0.52027905", "0.518865", "0.5178128", "0.51780194", "0.51352537", "0.51346874", "0.5124585", "0.51178783", "0.51078916", "0.51078...
0.7464433
0
Insert each predefined OD pair into the Route analysis for the manytomany case.
def _insert_stops_many_to_many(self): # Store data of the relevant origins and destinations in dictionaries for quick lookups and reuse o_data = {} # {Origin ID: [Shape, transferred fields]} for row in arcpy.da.SearchCursor( # pylint: disable=no-member self.input_origins_layer, [self.origin_id_field, "SHAPE@"] + self.origin_transfer_fields ): o_data[row[0]] = row[1:] d_data = {} # {Destination ID: [Shape, transferred fields]} for row in arcpy.da.SearchCursor( # pylint: disable=no-member self.input_destinations_layer, [self.dest_id_field, "SHAPE@"] + self.destination_transfer_fields ): d_data[row[0]] = row[1:] # Insert origins from each OD pair into the Route analysis with self.rt_solver.insertCursor( arcpy.nax.RouteInputDataType.Stops, ["RouteName", "Sequence", self.origin_unique_id_field_name, "SHAPE@"] + self.origin_transfer_fields ) as icur: for od_pair in self.od_pairs: origin_id, dest_id = od_pair try: origin_data = o_data[origin_id] except KeyError: # This should never happen because we should have preprocessed this out. self.logger.debug( f"Origin from OD Pairs not found in inputs. Skipped pair {od_pair}.") continue route_name = f"{origin_id} - {dest_id}" icur.insertRow((route_name, 1, origin_id) + origin_data) # Insert destinations from each OD pair into the Route analysis with self.rt_solver.insertCursor( arcpy.nax.RouteInputDataType.Stops, ["RouteName", "Sequence", self.dest_unique_id_field_name, "SHAPE@"] + self.destination_transfer_fields ) as icur: for od_pair in self.od_pairs: origin_id, dest_id = od_pair try: dest_data = d_data[dest_id] except KeyError: # This should never happen because we should have preprocessed this out. self.logger.debug( f"Destination from OD Pairs not found in inputs. Skipped pair {od_pair}.") continue route_name = f"{origin_id} - {dest_id}" icur.insertRow((route_name, 2, dest_id) + dest_data)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def add_parties(self, *parties) -> None:\n\n for party in parties:\n self._route_table['route_table'][party.get_id()] = party.to_entry_point(\n )", "def assign_aovs(self, aovs: List[AOV]):\n\t\tfor aov in aovs:\n\t\t\tself.assign_aov(aov)", "def route(self, ori, dest, pois):\n ...
[ "0.55640215", "0.50584227", "0.50430346", "0.502575", "0.5020248", "0.50056565", "0.49825788", "0.4962253", "0.49563637", "0.4931308", "0.4920993", "0.49143758", "0.4910077", "0.48764232", "0.48703808", "0.48517448", "0.48504838", "0.4847047", "0.48325107", "0.4819616", "0.48...
0.612654
0
Create and solve a Route analysis for the designated preassigned origindestination pairs.
def solve(self, chunk_definition): # pylint: disable=too-many-locals, too-many-statements, too-many-branches # Select the inputs to process if self.pair_type is helpers.PreassignedODPairType.one_to_one: self._select_inputs_one_to_one(chunk_definition) elif self.pair_type is helpers.PreassignedODPairType.many_to_many: self._get_od_pairs_for_chunk(chunk_definition) self._select_inputs_many_to_many() else: raise NotImplementedError(f"Invalid PreassignedODPairType: {self.pair_type}") # Initialize the Route solver object self.initialize_rt_solver() self._add_unique_id_fields() # Insert the origins and destinations self.logger.debug(f"Route solver fields transferred from Origins: {self.origin_transfer_fields}") self.logger.debug(f"Route solver fields transferred from Destinations: {self.destination_transfer_fields}") if self.pair_type is helpers.PreassignedODPairType.one_to_one: self._insert_stops_one_to_one() elif self.pair_type is helpers.PreassignedODPairType.many_to_many: self._insert_stops_many_to_many() else: raise NotImplementedError(f"Invalid PreassignedODPairType: {self.pair_type}") if self.rt_solver.count(arcpy.nax.RouteInputDataType.Stops) == 0: # There were no valid destinations for this set of origins self.logger.debug("No valid destinations for this set of origins. Skipping Route calculation.") return # Load barriers # Note: This loads ALL barrier features for every analysis, even if they are very far away from any of # the inputs in the current chunk. You may want to select only barriers within a reasonable distance of the # inputs, particularly if you run into the maximumFeaturesAffectedByLineBarriers, # maximumFeaturesAffectedByPointBarriers, and maximumFeaturesAffectedByPolygonBarriers tool limits for portal # solves. However, since barriers is likely an unusual case, deal with this only if it becomes a problem. for barrier_fc in self.barriers: self.logger.debug(f"Loading barriers feature class {barrier_fc}...") shape_type = arcpy.Describe(barrier_fc).shapeType if shape_type == "Polygon": class_type = arcpy.nax.RouteInputDataType.PolygonBarriers elif shape_type == "Polyline": class_type = arcpy.nax.RouteInputDataType.LineBarriers elif shape_type == "Point": class_type = arcpy.nax.RouteInputDataType.PointBarriers else: self.logger.warning( f"Barrier feature class {barrier_fc} has an invalid shape type and will be ignored." ) continue barriers_field_mappings = self.rt_solver.fieldMappings(class_type, True) self.rt_solver.load(class_type, barrier_fc, barriers_field_mappings, True) # Solve the Route analysis self.logger.debug("Solving Route...") solve_start = time.time() self.solve_result = self.rt_solver.solve() solve_end = time.time() self.logger.debug(f"Solving Route completed in {round(solve_end - solve_start, 3)} seconds.") # Handle solve messages solve_msgs = [msg[-1] for msg in self.solve_result.solverMessages(arcpy.nax.MessageSeverity.All)] for msg in solve_msgs: self.logger.debug(msg) # Update the result dictionary self.job_result["solveMessages"] = solve_msgs if not self.solve_result.solveSucceeded: self.logger.debug("Solve failed.") return self.logger.debug("Solve succeeded.") self.job_result["solveSucceeded"] = True # Save output self._export_to_feature_class(chunk_definition) self.logger.debug("Finished calculating Route.")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def traveling_salesman(destinations_1):\n # Instantiate the data problem.\n data = create_data_model()\n\n # NEW SPOT TO MAKE distance_matrix\n distance_matrix = compute_euclidean_distance_matrix(destinations_1)\n manager = pywrapcp.RoutingIndexManager(\n len(destinations_1), data['num_vehicl...
[ "0.67139935", "0.6400536", "0.6300604", "0.6229401", "0.6149414", "0.61029875", "0.60279167", "0.59973127", "0.59507513", "0.59370536", "0.5883438", "0.58514273", "0.58248085", "0.58187276", "0.57969856", "0.5756195", "0.5724864", "0.5687748", "0.5662441", "0.56054395", "0.55...
0.6315577
2
Export the Route result to a feature class.
def _export_to_feature_class(self, chunk_definition): # Make output gdb rt_workspace = self._create_output_gdb() # Export routes output_routes = os.path.join(rt_workspace, f"Routes_{chunk_definition[0]}_{chunk_definition[1]}") self.logger.debug(f"Exporting Route Routes output to {output_routes}...") self.solve_result.export(arcpy.nax.RouteOutputDataType.Routes, output_routes) # Export stops output_stops = os.path.join(rt_workspace, f"Stops_{chunk_definition[0]}_{chunk_definition[1]}") self.logger.debug(f"Exporting Route Stops output to {output_stops}...") self.solve_result.export(arcpy.nax.RouteOutputDataType.Stops, output_stops) # Join the input ID fields to Routes # The new FirstStopID and LastStopID fields were added at Pro 3.1 / Enterprise 11.1 to make relationships # between IDs/OIDs in output classes are more reliable. Use these fields if they exist in the output. # Otherwise, use FirstStopOID and LastStopOID, which are mostly reliable but not perfect. For best results, use # the most recent ArcGIS software. if "FirstStopID" in self.solve_result.fieldNames(arcpy.nax.RouteOutputDataType.Routes): id_field_prefix = "ID" else: id_field_prefix = "OID" if self.reverse_direction: first_stop_field = self.dest_unique_id_field_name second_stop_field = self.origin_unique_id_field_name else: first_stop_field = self.origin_unique_id_field_name second_stop_field = self.dest_unique_id_field_name with arcpy.EnvManager(overwriteOutput=True): helpers.run_gp_tool( self.logger, arcpy.management.JoinField, [output_routes, f"FirstStop{id_field_prefix}", output_stops, "ObjectID", [first_stop_field]] ) helpers.run_gp_tool( self.logger, arcpy.management.JoinField, [output_routes, f"LastStop{id_field_prefix}", output_stops, "ObjectID", [second_stop_field]] ) self.job_result["outputRoutes"] = output_routes
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _post_process_route_fcs(self):\r\n # Create the final output feature class\r\n desc = arcpy.Describe(self.route_fcs[0])\r\n helpers.run_gp_tool(\r\n LOGGER,\r\n arcpy.management.CreateFeatureclass, [\r\n os.path.dirname(self.out_routes),\r\n ...
[ "0.6221787", "0.5916604", "0.53928643", "0.5349487", "0.5265204", "0.52557456", "0.51746285", "0.51711124", "0.51549613", "0.5122801", "0.51089555", "0.50449544", "0.5029637", "0.50276506", "0.5018805", "0.4998023", "0.49829662", "0.49803904", "0.49756134", "0.49702242", "0.4...
0.72400814
0
Solve a Route analysis for the given inputs for the given chunk of preassigned OD pairs.
def solve_route(inputs, chunk): rt = Route(**inputs) if inputs["pair_type"] is helpers.PreassignedODPairType.one_to_one: rt.logger.info(f"Processing origins OID {chunk[0]} to {chunk[1]} as job id {rt.job_id}") elif inputs["pair_type"] is helpers.PreassignedODPairType.many_to_many: rt.logger.info(f"Processing chunk {chunk[0]} as job id {rt.job_id}") rt.solve(chunk) rt.teardown_logger() return rt.job_result
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def solve(self, chunk_definition): # pylint: disable=too-many-locals, too-many-statements, too-many-branches\r\n # Select the inputs to process\r\n if self.pair_type is helpers.PreassignedODPairType.one_to_one:\r\n self._select_inputs_one_to_one(chunk_definition)\r\n elif self.pair...
[ "0.7265158", "0.6186221", "0.5823401", "0.56546044", "0.5640756", "0.55801356", "0.54897994", "0.5467185", "0.5436492", "0.537317", "0.5360428", "0.5298746", "0.5258818", "0.523264", "0.5204271", "0.51753855", "0.5158027", "0.5145004", "0.5122745", "0.5106338", "0.50887746", ...
0.73194927
0
Compute Routes between origins and their assigned destinations in parallel and combine results. Compute Routes in parallel and combine the results. This class assumes that the inputs have already been preprocessed and validated.
def __init__( # pylint: disable=too-many-locals, too-many-arguments self, pair_type_str, origins, origin_id_field, destinations, dest_id_field, network_data_source, travel_mode, time_units, distance_units, max_routes, max_processes, out_routes, scratch_folder, reverse_direction=False, assigned_dest_field=None, od_pair_table=None, time_of_day=None, barriers=None ): pair_type = helpers.PreassignedODPairType[pair_type_str] self.origins = origins self.destinations = destinations self.out_routes = out_routes self.scratch_folder = scratch_folder time_units = helpers.convert_time_units_str_to_enum(time_units) distance_units = helpers.convert_distance_units_str_to_enum(distance_units) if not barriers: barriers = [] self.max_processes = max_processes if not time_of_day: time_of_day = None else: time_of_day = datetime.datetime.strptime(time_of_day, helpers.DATETIME_FORMAT) # Initialize the dictionary of inputs to send to each OD solve self.rt_inputs = { "pair_type": pair_type, "origins": self.origins, "origin_id_field": origin_id_field, "destinations": self.destinations, "dest_id_field": dest_id_field, "network_data_source": network_data_source, "travel_mode": travel_mode, "time_units": time_units, "distance_units": distance_units, "time_of_day": time_of_day, "reverse_direction": reverse_direction, "scratch_folder": self.scratch_folder, "assigned_dest_field": assigned_dest_field, "od_pair_table": od_pair_table, "barriers": barriers, "origin_transfer_fields": [], # Populate later "destination_transfer_fields": [] # Populate later } # List of intermediate output OD Line files created by each process self.route_fcs = [] # Construct OID ranges for chunks of origins and destinations if pair_type is helpers.PreassignedODPairType.one_to_one: # Chunks are of the format [first origin ID, second origin ID] self.chunks = helpers.get_oid_ranges_for_input(origins, max_routes) elif pair_type is helpers.PreassignedODPairType.many_to_many: # Chunks are of the format [chunk_num, chunk_size] num_od_pairs = 0 with open(od_pair_table, "r", encoding="utf-8") as f: for _ in f: num_od_pairs += 1 num_chunks = ceil(num_od_pairs / max_routes) self.chunks = [[i, max_routes] for i in range(num_chunks)] # Calculate the total number of jobs to use in logging self.total_jobs = len(self.chunks) self.optimized_cost_field = None
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def solve_route_in_parallel(self):\r\n # Validate Route settings. Essentially, create a dummy Route class instance and set up the\r\n # solver object to ensure this at least works. Do this up front before spinning up a bunch of parallel processes\r\n # that are guaranteed to all fail.\r\n ...
[ "0.6572114", "0.64402664", "0.59652627", "0.5922691", "0.5920702", "0.5907399", "0.59004366", "0.58980155", "0.5884197", "0.58173287", "0.57752156", "0.57466954", "0.57233685", "0.5712399", "0.57113975", "0.57011473", "0.56868786", "0.56685567", "0.5667153", "0.5647773", "0.5...
0.48959836
83
Validate Route settings before spinning up a bunch of parallel processes doomed to failure. Also check which field name in the output OD Lines will store the optimized cost values. This depends on the travel mode being used by the analysis, and we capture it here to use in later steps.
def _validate_route_settings(self): # Create a dummy Route object and set properties. This allows us to # detect any errors prior to spinning up a bunch of parallel processes and having them all fail. LOGGER.debug("Validating Route settings...") rt = None try: rt = Route(**self.rt_inputs) rt.initialize_rt_solver() LOGGER.debug("Route settings successfully validated.") except Exception: LOGGER.error("Error initializing Route analysis.") errs = traceback.format_exc().splitlines() for err in errs: LOGGER.error(err) raise finally: if rt: LOGGER.debug("Deleting temporary test Route job folder...") # Close logging rt.teardown_logger() # Delete output folder shutil.rmtree(rt.job_result["jobFolder"], ignore_errors=True) del rt
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def f_check_adr_parameters_correctness(dict):\n\n if int(dict[\"operation_mode_num\"]) not in (0, 1, 2, 3, 4, 5, 6):\n print('\\n Error!!! Operation mode is wrong!\\n')\n sys.exit(' Program stopped!')\n\n if int(dict[\"FFT_size_samples\"]) not in (2048, 4096, 8192, 16384, 32768):\n pri...
[ "0.58897823", "0.5755311", "0.5660653", "0.557182", "0.5522201", "0.550903", "0.54448694", "0.53637433", "0.5346746", "0.5307992", "0.52819705", "0.52334815", "0.52329284", "0.52285826", "0.5222194", "0.52111304", "0.51922095", "0.5165339", "0.51619154", "0.51523876", "0.5147...
0.7251164
0
Discover if the origins and destinations include valid fields we can use in the Route analysis. Any fields with the correct names and data types matching valid fields recognized by the Route solver for the Stops input can be used in the analysis. Compare the input origins and destinations fields with the list of supported Route Stops fields and populate the list of fields to transfer in the route inputs dictionary.
def _populate_input_data_transfer_fields(self): # Valid fields for the Route Stops input are described here: # https://pro.arcgis.com/en/pro-app/latest/arcpy/network-analyst/route-input-data-types.htm # Do not transfer RouteName or Sequence as these are explicitly controlled by this tool. Do not transfer # LocationType because we want all inputs to be Stops. Waypoints don't make sense for this analysis. int_types = ["Integer", "SmallInteger"] numerical_types = ["Double", "Single"] + int_types rt_stops_input_fields = { "Name": ["String"], "AdditionalTime": numerical_types, "AdditionalDistance": numerical_types, "AdditionalCost": numerical_types, "TimeWindowStart": ["Date"], "TimeWindowEnd": ["Date"], "CurbApproach": int_types, "Bearing": numerical_types, "BearingTol": numerical_types, "NavLatency": numerical_types, "SourceID": int_types, "SourceOID": int_types, "PosAlong": numerical_types, "SideOfEdge": int_types } # Preserve origin and destination input fields that match names and types origin_transfer_fields = [ f.name for f in arcpy.ListFields(self.origins) if f.name in rt_stops_input_fields and f.type in rt_stops_input_fields[f.name]] self.rt_inputs["origin_transfer_fields"] = origin_transfer_fields if origin_transfer_fields: LOGGER.info(( "Supported fields in the input Origins table that will be used in the analysis: " f"{origin_transfer_fields}" )) destination_transfer_fields = [ f.name for f in arcpy.ListFields(self.destinations) if f.name in rt_stops_input_fields and f.type in rt_stops_input_fields[f.name]] self.rt_inputs["destination_transfer_fields"] = destination_transfer_fields if destination_transfer_fields: LOGGER.info(( "Supported fields in the input Destinations table that will be used in the analysis: " f"{destination_transfer_fields}" ))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _insert_stops_one_to_one(self): # pylint: disable=too-many-locals\r\n # Use an insertCursor to insert Stops into the Route analysis\r\n destinations = {}\r\n destination_rows = []\r\n with self.rt_solver.insertCursor(\r\n arcpy.nax.RouteInputDataType.Stops,\r\n ...
[ "0.6113399", "0.5643113", "0.54353887", "0.5379565", "0.5359705", "0.53044266", "0.5218753", "0.5165528", "0.51130545", "0.5098658", "0.5056272", "0.5042018", "0.50327575", "0.5018349", "0.50022423", "0.49981675", "0.49830243", "0.49737117", "0.49564588", "0.4953842", "0.4924...
0.69479537
0
Solve the Route in chunks and postprocess the results.
def solve_route_in_parallel(self): # Validate Route settings. Essentially, create a dummy Route class instance and set up the # solver object to ensure this at least works. Do this up front before spinning up a bunch of parallel processes # that are guaranteed to all fail. self._validate_route_settings() # Check if the input origins and destinations have any fields we should use in the route analysis self._populate_input_data_transfer_fields() # Compute Route in parallel LOGGER.info(f"Beginning parallelized Route solves ({self.total_jobs} chunks)") completed_jobs = 0 # Track the number of jobs completed so far to use in logging # Use the concurrent.futures ProcessPoolExecutor to spin up parallel processes that solve the routes with futures.ProcessPoolExecutor(max_workers=self.max_processes) as executor: # Each parallel process calls the solve_route() function with the rt_inputs dictionary for the # given origin ranges and their assigned destinations. jobs = {executor.submit(solve_route, self.rt_inputs, range): range for range in self.chunks} # As each job is completed, add some logging information and store the results to post-process later for future in futures.as_completed(jobs): try: # The Route job returns a results dictionary. Retrieve it. result = future.result() except Exception: # pylint: disable=broad-except # If we couldn't retrieve the result, some terrible error happened and the job errored. # Note: This does not mean solve failed. It means some unexpected error was thrown. The most likely # causes are: # a) If you're calling a service, the service was temporarily down. # b) You had a temporary file read/write or resource issue on your machine. # c) If you're actively updating the code, you introduced an error. # To make the tool more robust against temporary glitches, retry submitting the job up to the number # of times designated in helpers.MAX_RETRIES. If the job is still erroring after that many retries, # fail the entire tool run. errs = traceback.format_exc().splitlines() failed_range = jobs[future] LOGGER.debug(( f"Failed to get results for Route chunk {failed_range} from the parallel process. Will retry " f"up to {helpers.MAX_RETRIES} times. Errors: {errs}" )) job_failed = True num_retries = 0 while job_failed and num_retries < helpers.MAX_RETRIES: num_retries += 1 try: future = executor.submit(solve_route, self.rt_inputs, failed_range) result = future.result() job_failed = False LOGGER.debug(f"Route chunk {failed_range} succeeded after {num_retries} retries.") except Exception: # pylint: disable=broad-except # Update exception info to the latest error errs = traceback.format_exc().splitlines() if job_failed: # The job errored and did not succeed after retries. Fail the tool run because something # terrible is happening. LOGGER.debug(f"Route chunk {failed_range} continued to error after {num_retries} retries.") LOGGER.error("Failed to get Route result from parallel processing.") errs = traceback.format_exc().splitlines() for err in errs: LOGGER.error(err) raise # If we got this far, the job completed successfully and we retrieved results. completed_jobs += 1 LOGGER.info( f"Finished Route calculation {completed_jobs} of {self.total_jobs}.") # Parse the results dictionary and store components for post-processing. if result["solveSucceeded"]: self.route_fcs.append(result["outputRoutes"]) else: # Typically, a solve fails because no destinations were found for any of the origins in the chunk, # and this is a perfectly legitimate failure. It is not an error. However, they may be other, less # likely, reasons for solve failure. Write solve messages to the main GP message thread in debug # mode only in case the user is having problems. The user can also check the individual OD log # files. LOGGER.debug(f"Solve failed for job id {result['jobId']}.") LOGGER.debug(result["solveMessages"]) # Post-process outputs if self.route_fcs: LOGGER.info("Post-processing Route results...") self.route_fcs = sorted(self.route_fcs) self._post_process_route_fcs() else: LOGGER.warning("All Route solves failed, so no output was produced.") # Clean up # Delete the job folders if the job succeeded if DELETE_INTERMEDIATE_OUTPUTS: LOGGER.info("Deleting intermediate outputs...") try: shutil.rmtree(self.scratch_folder, ignore_errors=True) except Exception: # pylint: disable=broad-except # If deletion doesn't work, just throw a warning and move on. This does not need to kill the tool. LOGGER.warning(f"Unable to delete intermediate Route output folder {self.scratch_folder}.") LOGGER.info("Finished calculating Routes.")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def solve_route(inputs, chunk):\r\n rt = Route(**inputs)\r\n if inputs[\"pair_type\"] is helpers.PreassignedODPairType.one_to_one:\r\n rt.logger.info(f\"Processing origins OID {chunk[0]} to {chunk[1]} as job id {rt.job_id}\")\r\n elif inputs[\"pair_type\"] is helpers.PreassignedODPairType.many_to_m...
[ "0.6792837", "0.5855412", "0.5633914", "0.548248", "0.54527795", "0.532048", "0.53184384", "0.52948433", "0.52264017", "0.5213624", "0.51902413", "0.51709", "0.51507914", "0.5139252", "0.51372004", "0.5116195", "0.5072712", "0.507154", "0.50373584", "0.50307095", "0.5008679",...
0.6839286
0
Merge the routes calculated in each separate process into a single feature class. Create an empty final output feature class and populate it using InsertCursor, as this tends to be faster than using the Merge geoprocessing tool.
def _post_process_route_fcs(self): # Create the final output feature class desc = arcpy.Describe(self.route_fcs[0]) helpers.run_gp_tool( LOGGER, arcpy.management.CreateFeatureclass, [ os.path.dirname(self.out_routes), os.path.basename(self.out_routes), "POLYLINE", self.route_fcs[0], # template feature class to transfer full schema "SAME_AS_TEMPLATE", "SAME_AS_TEMPLATE", desc.spatialReference ] ) # Insert the rows from all the individual output feature classes into the final output fields = ["SHAPE@"] + [f.name for f in desc.fields] with arcpy.da.InsertCursor(self.out_routes, fields) as cur: # pylint: disable=no-member for fc in self.route_fcs: for row in arcpy.da.SearchCursor(fc, fields): # pylint: disable=no-member cur.insertRow(row)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _export_to_feature_class(self, chunk_definition):\r\n # Make output gdb\r\n rt_workspace = self._create_output_gdb()\r\n\r\n # Export routes\r\n output_routes = os.path.join(rt_workspace, f\"Routes_{chunk_definition[0]}_{chunk_definition[1]}\")\r\n self.logger.debug(f\"Export...
[ "0.60113615", "0.5859844", "0.5772984", "0.5617712", "0.5504301", "0.5316391", "0.5313953", "0.5273943", "0.5268024", "0.5257252", "0.52431345", "0.52355766", "0.52347904", "0.52220434", "0.52174675", "0.5180655", "0.5177188", "0.5174838", "0.51718193", "0.5159236", "0.511246...
0.71977544
0
Read arguments passed in via subprocess and run the parallel Route. This script is intended to be called via subprocess via the solve_large_route_pair_analysis.py module, which does essential preprocessing and validation. Users should not call this script directly from the command line. We must launch this script via subprocess in order to support parallel processing from an ArcGIS Pro script tool, which cannot do parallel processing directly.
def launch_parallel_rt_pairs(): # Create the parser parser = argparse.ArgumentParser(description=globals().get("__doc__", ""), fromfile_prefix_chars='@') # Define Arguments supported by the command line utility # --pair-type parameter help_string = "The type of origin-destination pair assignment to use. Either one_to_one or many_to_many." parser.add_argument("-pt", "--pair-type", action="store", dest="pair_type_str", help=help_string, required=True) # --origins parameter help_string = "The full catalog path to the feature class containing the origins." parser.add_argument("-o", "--origins", action="store", dest="origins", help=help_string, required=True) # --origins-id-field parameter help_string = "The name of the unique ID field in origins." parser.add_argument( "-oif", "--origins-id-field", action="store", dest="origin_id_field", help=help_string, required=True) # --destinations parameter help_string = "The full catalog path to the feature class containing the destinations." parser.add_argument("-d", "--destinations", action="store", dest="destinations", help=help_string, required=True) # --destinations-id-field parameter help_string = "The name of the unique ID field in destinations." parser.add_argument( "-dif", "--destinations-id-field", action="store", dest="dest_id_field", help=help_string, required=True) # --network-data-source parameter help_string = "The full catalog path to the network dataset or a portal url that will be used for the analysis." parser.add_argument( "-n", "--network-data-source", action="store", dest="network_data_source", help=help_string, required=True) # --travel-mode parameter help_string = ( "The name or JSON string representation of the travel mode from the network data source that will be used for " "the analysis." ) parser.add_argument("-tm", "--travel-mode", action="store", dest="travel_mode", help=help_string, required=True) # --time-units parameter help_string = "String name of the time units for the analysis. These units will be used in the output." parser.add_argument("-tu", "--time-units", action="store", dest="time_units", help=help_string, required=True) # --distance-units parameter help_string = "String name of the distance units for the analysis. These units will be used in the output." parser.add_argument( "-du", "--distance-units", action="store", dest="distance_units", help=help_string, required=True) # --max-routes parameter help_string = "Maximum number of routes that can be in one chunk for parallel processing of Route solves." parser.add_argument( "-mr", "--max-routes", action="store", dest="max_routes", type=int, help=help_string, required=True) # --max-processes parameter help_string = "Maximum number parallel processes to use for the Route solves." parser.add_argument( "-mp", "--max-processes", action="store", dest="max_processes", type=int, help=help_string, required=True) # --reverse-direction parameter help_string = "Whether to reverse the direction of travel (destination to origin)." parser.add_argument( "-rd", "--reverse-direction", action="store", type=lambda x: bool(strtobool(x)), dest="reverse_direction", help=help_string, required=True) # --out-routes parameter help_string = "The full catalog path to the output routes feature class." parser.add_argument("-r", "--out-routes", action="store", dest="out_routes", help=help_string, required=True) # --scratch-folder parameter help_string = "The full catalog path to the scratch folder where intermediate outputs will be stored." parser.add_argument( "-sf", "--scratch-folder", action="store", dest="scratch_folder", help=help_string, required=True) # --assigned-dest-field parameter help_string = ("The name of the field in origins indicating the assigned destination. " "Required for one_to_one pair-type") parser.add_argument( "-adf", "--assigned-dest-field", action="store", dest="assigned_dest_field", help=help_string, required=False) # --od-pair-table parameter help_string = "CSV file holding preassigned OD pairs. Required for many_to_many pair-type." parser.add_argument( "-odp", "--od-pair-table", action="store", dest="od_pair_table", help=help_string, required=False) # --time-of-day parameter help_string = (f"The time of day for the analysis. Must be in {helpers.DATETIME_FORMAT} format. Set to None for " "time neutral.") parser.add_argument("-tod", "--time-of-day", action="store", dest="time_of_day", help=help_string, required=False) # --barriers parameter help_string = "A list of catalog paths to the feature classes containing barriers to use in the Route." parser.add_argument( "-b", "--barriers", action="store", dest="barriers", help=help_string, nargs='*', required=False) try: # Get arguments as dictionary. args = vars(parser.parse_args()) # Initialize a parallel Route calculator class rt_calculator = ParallelRoutePairCalculator(**args) # Solve the Route in parallel chunks start_time = time.time() rt_calculator.solve_route_in_parallel() LOGGER.info(f"Parallel Route calculation completed in {round((time.time() - start_time) / 60, 2)} minutes") except Exception: # pylint: disable=broad-except LOGGER.error("Error in parallelization subprocess.") errs = traceback.format_exc().splitlines() for err in errs: LOGGER.error(err) raise
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def solve_route_in_parallel(self):\r\n # Validate Route settings. Essentially, create a dummy Route class instance and set up the\r\n # solver object to ensure this at least works. Do this up front before spinning up a bunch of parallel processes\r\n # that are guaranteed to all fail.\r\n ...
[ "0.64769953", "0.62038994", "0.57405895", "0.554428", "0.55252844", "0.551435", "0.55140823", "0.55098575", "0.54549307", "0.5444846", "0.54388684", "0.54316586", "0.5404963", "0.53763914", "0.5359437", "0.5330171", "0.5305152", "0.53005004", "0.53004366", "0.5288501", "0.527...
0.68828976
0
Expand various colors to RRGGBB.
def expand_color(color, default=None, passthrough=False, block=None): if color: if color[0] == "#": color = color[1:] try: int(color, 16) except ValueError: return block length = len(color) if length in [3, 4]: color = "".join(color[x] * 2 for x in range(length)) elif length not in [6, 8]: return block return "#" + color.upper() elif block: return block return COLOR_NAMES.get(color, color if passthrough else default)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def color_rgb(r,g,b):\n return \"#%02x%02x%02x\" % (r,g,b)", "def color_rgb(r,g,b):\n return \"#%02x%02x%02x\" % (r,g,b)", "def color_rgb(r,g,b):\n return \"#%02x%02x%02x\" % (r,g,b)", "def combine_color(red, green, blue):\r\n\r\n return (red << 16) + (green << 8) + blue", "def rgb(r, g, b)...
[ "0.64377636", "0.64377636", "0.64377636", "0.6364302", "0.6349941", "0.63338196", "0.6211546", "0.6143853", "0.6105021", "0.61023855", "0.61022055", "0.604399", "0.60202074", "0.59963423", "0.5995744", "0.5948604", "0.5905812", "0.59024096", "0.5871113", "0.5865381", "0.58587...
0.5423834
67
Get the tokenized format_string. Tokenizing is resource intensive so we only do it once and cache it
def tokens(self, format_string): if format_string not in self.format_string_cache: tokens = list(re.finditer(self.reg_ex, format_string)) self.format_string_cache[format_string] = tokens return self.format_string_cache[format_string]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _get_format(self, token):\n if token in self._formats:\n return self._formats[token]\n\n if self._style is None:\n result = self._get_format_from_document(token, self._document)\n else:\n result = self._get_format_from_style(token, self._style)\n\n s...
[ "0.7031436", "0.5833799", "0.5587852", "0.55370355", "0.55229795", "0.5514945", "0.5489987", "0.5487361", "0.5463768", "0.5454155", "0.54331946", "0.53677803", "0.5347152", "0.5261837", "0.5218769", "0.52003187", "0.5194947", "0.51801944", "0.5157241", "0.5129288", "0.5112594...
0.7236307
0
Parses the format_string and returns a set of color names.
def get_color_names(self, format_string): names = set() # Tokenize the format string and process them for token in self.tokens(format_string): if token.group("command"): name = dict(parse_qsl(token.group("command"))).get("color") if ( not name or name in COLOR_NAMES_EXCLUDED or name in COLOR_NAMES or name[0] == "#" ): continue names.add(name) return names
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def reformatColor(self, colorStr):\n if type(colorStr) is str:\n if colorStr.startswith('#'):\n colorStr = colorStr.replace('#', '')\n else:\n raise Exception('color is not hex format')\n r = int(colorStr[:2], 16)\n g = int(colorStr[2...
[ "0.61174923", "0.60446614", "0.60343224", "0.6032042", "0.5956374", "0.5867158", "0.5859119", "0.5769622", "0.57691973", "0.57649046", "0.5756081", "0.5748874", "0.5739813", "0.5683501", "0.5625979", "0.5616915", "0.5559172", "0.55575323", "0.5540814", "0.5540103", "0.5533907...
0.8132206
0
Parses the format_string and returns a set of placeholders.
def get_placeholders(self, format_string): placeholders = set() # Tokenize the format string and process them for token in self.tokens(format_string): if token.group("placeholder"): placeholders.add(token.group("key")) elif token.group("command"): # get any placeholders used in commands commands = dict(parse_qsl(token.group("command"))) # placeholders only used in `if` if_ = commands.get("if") if if_: placeholders.add(Condition(if_).variable) return placeholders
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_placeholder_formats_list(self, format_string):\n placeholders = []\n # Tokenize the format string and process them\n for token in self.tokens(format_string):\n if token.group(\"placeholder\"):\n placeholders.append((token.group(\"key\"), token.group(\"format\"...
[ "0.7910855", "0.694886", "0.64524835", "0.6108374", "0.60248744", "0.601687", "0.60016334", "0.5864088", "0.57037616", "0.55861354", "0.55645305", "0.554253", "0.5510738", "0.55103034", "0.5497989", "0.5479562", "0.5358258", "0.5325262", "0.5298705", "0.5236709", "0.5222342",...
0.7691594
1
Parses the format_string and returns a list of tuples (placeholder, format).
def get_placeholder_formats_list(self, format_string): placeholders = [] # Tokenize the format string and process them for token in self.tokens(format_string): if token.group("placeholder"): placeholders.append((token.group("key"), token.group("format"))) return placeholders
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def update_placeholder_formats(self, format_string, placeholder_formats):\n # Tokenize the format string and process them\n output = []\n for token in self.tokens(format_string):\n if (\n token.group(\"placeholder\")\n and (not token.group(\"format\"))\...
[ "0.65669346", "0.62842053", "0.6130616", "0.61288065", "0.60436875", "0.5937322", "0.5901326", "0.58831894", "0.5852524", "0.57953036", "0.5765644", "0.57541496", "0.57023305", "0.56520545", "0.56048083", "0.5603498", "0.5459637", "0.5429871", "0.5376852", "0.53540975", "0.53...
0.784211
0
Update a format string renaming placeholders.
def update_placeholders(self, format_string, placeholders): # Tokenize the format string and process them output = [] for token in self.tokens(format_string): if token.group("key") in placeholders: output.append( "{{{}{}}}".format(placeholders[token.group("key")], token.group("format")) ) continue elif token.group("command"): # update any placeholders used in commands commands = parse_qsl(token.group("command"), keep_blank_values=True) # placeholders only used in `if` if "if" in [x[0] for x in commands]: items = [] for key, value in commands: if key == "if": # we have to rebuild from the parts we have condition = Condition(value) variable = condition.variable if variable in placeholders: variable = placeholders[variable] # negation via `!` not_ = "!" if not condition.default else "" condition_ = condition.condition or "" # if there is no condition then there is no # value if condition_: value_ = condition.value else: value_ = "" value = "{}{}{}{}".format(not_, variable, condition_, value_) if value: items.append(f"{key}={value}") else: items.append(key) # we cannot use urlencode because it will escape things # like `!` output.append(r"\?{} ".format("&".join(items))) continue value = token.group(0) output.append(value) return "".join(output)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def update_placeholder_formats(self, format_string, placeholder_formats):\n # Tokenize the format string and process them\n output = []\n for token in self.tokens(format_string):\n if (\n token.group(\"placeholder\")\n and (not token.group(\"format\"))\...
[ "0.7112868", "0.6138306", "0.60262024", "0.60064745", "0.5947875", "0.5682744", "0.5643566", "0.5574067", "0.5571616", "0.5536187", "0.5533459", "0.545436", "0.5446608", "0.5419904", "0.539936", "0.53418016", "0.5337642", "0.5330448", "0.53120375", "0.5303506", "0.5267097", ...
0.6070207
2
Update a format string adding formats if they are not already present.
def update_placeholder_formats(self, format_string, placeholder_formats): # Tokenize the format string and process them output = [] for token in self.tokens(format_string): if ( token.group("placeholder") and (not token.group("format")) and token.group("key") in placeholder_formats ): output.append(f"{{{token.group('key')}{placeholder_formats[token.group('key')]}}}") continue value = token.group(0) output.append(value) return "".join(output)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def AddFormat(self, format):\n self._legacy = False\n if format:\n self._format = format", "def addIfMissing(self, format):\n self.setdefault(format.name, format)", "def _translate_fmts(self):\n fmt_info = []\n fmt_append = fmt_info.append\n \n isvalid = self._is_v...
[ "0.64630926", "0.62091905", "0.5800091", "0.5796576", "0.5712765", "0.57114947", "0.57108057", "0.55900645", "0.5558063", "0.55575746", "0.543823", "0.5434389", "0.5409675", "0.54084057", "0.5407691", "0.53960705", "0.5389524", "0.5365024", "0.536046", "0.53591377", "0.534718...
0.6789656
0
Parse the format string into blocks containing Literals, Placeholders etc that we can cache and reuse.
def build_block(self, format_string): first_block = Block(None, py3_wrapper=self.py3_wrapper) block = first_block # Tokenize the format string and process them for token in self.tokens(format_string): value = token.group(0) if token.group("block_start"): # Create new block block = block.new_block() elif token.group("block_end"): # Close block setting any valid state as needed # and return to parent block to continue if not block.parent: raise Exception("Too many `]`") block = block.parent elif token.group("switch"): # a new option has been created block = block.switch() elif token.group("placeholder"): # Found a {placeholder} key = token.group("key") format = token.group("format") block.add(Placeholder(key, format)) elif token.group("literal"): block.add(Literal(value)) elif token.group("lost_brace"): # due to how parsing happens we can get a lonesome } # eg in format_string '{{something}' this fixes that issue block.add(Literal(value)) elif token.group("command"): # a block command has been found block.set_commands(token.group("command")) elif token.group("escaped"): # escaped characters add unescaped values if value[0] in ["\\", "{", "}"]: value = value[1:] block.add(Literal(value)) if block.parent: raise Exception("Block not closed") # add to the cache self.block_cache[format_string] = first_block
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _parse(self, fmtstr):\n def _match_brace(string, start_pos, pair='[]'):\n \"\"\"Pairing brackets (used internally in _parse method)\"\"\"\n depth = 1\n if string[start_pos] != pair[0]:\n return None\n for index, char in enumerate(string[start_po...
[ "0.64509463", "0.61767745", "0.6141088", "0.6093478", "0.59834534", "0.59391457", "0.5813534", "0.5672864", "0.565769", "0.5619488", "0.5604256", "0.5531012", "0.5529596", "0.55168927", "0.54857767", "0.545964", "0.5454546", "0.5443225", "0.54375136", "0.54195976", "0.5334066...
0.73439676
0
Format a string, substituting place holders which can be found in param_dict, attributes of the supplied module, or provided via calls to the attr_getter function.
def format( self, format_string, module=None, param_dict=None, force_composite=False, attr_getter=None, ): if param_dict is None: param_dict = {} # if the processed format string is not in the cache then create it. if format_string not in self.block_cache: self.build_block(format_string) first_block = self.block_cache[format_string] def get_parameter(key): """ function that finds and returns the value for a placeholder. """ if key in param_dict: # was a supplied parameter param = param_dict.get(key) elif module and hasattr(module, key): param = getattr(module, key) if hasattr(param, "__call__"): # we don't allow module methods raise Exception() elif attr_getter: # get value from attr_getter function try: param = attr_getter(key) except: # noqa e722 raise Exception() else: raise Exception() if isinstance(param, Composite): if param.text(): param = param.copy() else: param = "" return param # render our processed format valid, output = first_block.render(get_parameter, module) # clean things up a little if isinstance(output, list): output = Composite(output) if not output: if force_composite: output = Composite() else: output = "" return output
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def replace_param(string, param, value, param_format=None):\n\n if param_format == \"json\":\n return sub(r\"(?P<json_replacement>\\\"%s\\\"\\s*:\\s*)\\\"\\s*\\\"\" %\n escape(str(param)), \"\\\\1\\\"%s\\\"\" % value, string)\n elif param_format == \"h...
[ "0.5990877", "0.59723574", "0.57197136", "0.5632017", "0.56243557", "0.5579166", "0.54825616", "0.5474345", "0.5385853", "0.5365595", "0.5335425", "0.5315145", "0.5266371", "0.5259829", "0.52348405", "0.52140254", "0.5195806", "0.5177217", "0.5099359", "0.5097019", "0.5095286...
0.6764331
0
function that finds and returns the value for a placeholder.
def get_parameter(key): if key in param_dict: # was a supplied parameter param = param_dict.get(key) elif module and hasattr(module, key): param = getattr(module, key) if hasattr(param, "__call__"): # we don't allow module methods raise Exception() elif attr_getter: # get value from attr_getter function try: param = attr_getter(key) except: # noqa e722 raise Exception() else: raise Exception() if isinstance(param, Composite): if param.text(): param = param.copy() else: param = "" return param
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __findPlaceholder(self, data, index):\r\n m = self.__placeholder_re.search(data, index)\r\n if m:\r\n return m.group(1), m.end()\r\n else:\r\n return None, index + 1", "def placeholder(self):\n return self._placeholder", "def placeholder(self) -> str | None...
[ "0.72384614", "0.65259403", "0.6496153", "0.64682907", "0.60817194", "0.58944154", "0.5852093", "0.5762202", "0.57511127", "0.56862986", "0.56270653", "0.55724573", "0.5522929", "0.5522929", "0.5460267", "0.5454473", "0.5445658", "0.54357004", "0.54279006", "0.54072934", "0.5...
0.0
-1
return the correct value for the placeholder
def get(self, get_params, block): value = f"{{{self.key}}}" try: value = value_ = get_params(self.key) if self.format.startswith(":"): # if a parameter has been set to be formatted as a numeric # type then we see if we can coerce it to be. This allows # the user to format types that normally would not be # allowed eg '123' it also allows {:d} to be used as a # shorthand for {:.0f}. Use {:g} to remove insignificant # trailing zeroes and the decimal point too if there are # no remaining digits following it. If the parameter cannot # be successfully converted then the format will be removed. try: if "escape" in self.format: value = escape(value) if "ceil" in self.format: value = ceil(float(value)) if "f" in self.format: value = float(value) if "g" in self.format: value = float(value) if "d" in self.format: value = int(float(value)) output = f"{{[{self.key}]{self.format}}}" value = output.format({self.key: value}) value_ = float(value) except ValueError: pass elif self.format.startswith("!"): output = f"{{{self.key}{self.format}}}" value = value_ = output.format(**{self.key: value}) if block.commands.not_zero: valid = value_ not in ["", None, False, "0", "0.0", 0, 0.0] else: # '', None, and False are ignored # numbers like 0 and 0.0 are not. valid = not (value_ in ["", None] or value_ is False) enough = False except: # noqa e722 # Exception raised when we don't have the param enough = True valid = False return valid, value, enough
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def placeholder(self):\n return self._placeholder", "def placeholder(self) -> str | None:\n return self._underlying.placeholder", "def get_val(self):\n return", "def get_geom_placeholder(self, value, srid):\r\n if hasattr(value, 'expression'):\r\n placeholder = '%s.%s' ...
[ "0.7000699", "0.67986435", "0.6459541", "0.6312645", "0.61937106", "0.6171836", "0.615392", "0.6140465", "0.61130303", "0.60600764", "0.59495264", "0.5948369", "0.5947296", "0.5940576", "0.59120387", "0.59089774", "0.5895668", "0.58923846", "0.58919656", "0.58830136", "0.5882...
0.0
-1
Check if the condition has been met. We need to make sure that we are of the correct type.
def _check_valid_condition(self, get_params): try: variable = get_params(self.variable) except: # noqa e722 variable = None value = self.value # if None, return oppositely if variable is None: return not self.default # convert the value to a correct type if isinstance(variable, bool): value = bool(self.value) elif isinstance(variable, Number): try: value = int(self.value) except: # noqa e722 try: value = float(self.value) except: # noqa e722 # could not parse return not self.default # compare and return the result if self.condition == "=": return (variable == value) == self.default elif self.condition == ">": return (variable > value) == self.default elif self.condition == "<": return (variable < value) == self.default
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def condition(self):\n return True", "def check_condition(self):\n\n\t\traw_context = {\n\t\t\t'folk': self.folk\n\t\t}\n\n\t\tstatus, param = self.execute(self.mission_grid, 'condition', self.pending_mission.kingdom, raw_context)\n\t\treturn status", "def check_condition(self, comment):\n if com...
[ "0.67982715", "0.6380257", "0.6109528", "0.60492665", "0.60492665", "0.5985159", "0.5962633", "0.5921427", "0.5918396", "0.5918396", "0.5901136", "0.5835458", "0.5809594", "0.5807828", "0.5793611", "0.57830834", "0.5774989", "0.5774989", "0.5774989", "0.5774989", "0.57692593"...
0.5867462
11
Simple check that the variable is set
def _check_valid_basic(self, get_params): try: if get_params(self.variable): return self.default except: # noqa e722 pass return not self.default
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def has_value(var) :\n return var != None", "def isSet(self) -> bool:\n ...", "def has_assignment_for(self, var):\n return self.variable_to_value.get(var) != None", "def _var_check(self):\n missing = set()\n for v in self.variables:\n if getattr(self, v) is None:\n ...
[ "0.718136", "0.6951559", "0.6602433", "0.65596896", "0.6471603", "0.6464828", "0.64093935", "0.63088953", "0.61869645", "0.61869645", "0.61869645", "0.61869645", "0.61869645", "0.61869645", "0.61869645", "0.61869645", "0.61869645", "0.61869645", "0.61869645", "0.61869645", "0...
0.674686
2
update with commands from the block
def update_commands(self, commands_str): commands = dict(parse_qsl(commands_str, keep_blank_values=True)) _if = commands.get("if", self._if) if _if: self._if = Condition(_if) self._set_int(commands, "max_length") self._set_int(commands, "min_length") self.color = expand_color(commands.get("color"), passthrough=True, block=self.color) self.not_zero = "not_zero" in commands or self.not_zero self.show = "show" in commands or self.show self.soft = "soft" in commands or self.soft
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def update():", "def update():", "def commandUpdate(self):\n pass", "def update( ):\r\n pass", "def update(self) -> None:\n ...", "def update(self):\n pass", "def update(self):\n pass", "def update(self):\n pass", "def update(self):\n pass", "def u...
[ "0.70040303", "0.70040303", "0.68496", "0.68419474", "0.63492554", "0.6342758", "0.6342758", "0.6342758", "0.6342758", "0.6342758", "0.6342758", "0.6342758", "0.6342758", "0.6342758", "0.6342758", "0.6342758", "0.6342758", "0.6342758", "0.6342758", "0.6342758", "0.6337077", ...
0.0
-1
set integer value from commands
def _set_int(self, commands, name): if name in commands: try: value = int(commands[name]) setattr(self, name, value) except ValueError: pass
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def setInteger(self, value):", "def setInteger(self, value):", "def set_num(self, num):\n self.cmd_num = num", "def setInteger(self, value: int):\n self.value = value", "def setInt(self, addr: ghidra.program.model.address.Address, value: int) -> None:\n ...", "def getint(self, strcom...
[ "0.71163434", "0.71163434", "0.67835045", "0.62937844", "0.61526585", "0.6076695", "0.6070734", "0.6059233", "0.59684515", "0.5945891", "0.5915541", "0.58779633", "0.5800634", "0.5786643", "0.5786482", "0.5768864", "0.57324964", "0.5721998", "0.56996304", "0.5692422", "0.5685...
0.8104841
0
set any commands for this block
def set_commands(self, command_str): self.commands.update_commands(command_str)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def commands(self, commands):\n\n self._commands = commands", "def commands():", "def commands():\n pass", "def commands():\n pass", "def commands():\n pass", "def commands():\n pass", "def at_cmdset_creation(self):\n super().at_cmdset_creation()\n #\n # any comm...
[ "0.7058163", "0.70184225", "0.6852532", "0.6852532", "0.6852532", "0.6852532", "0.6804543", "0.6804543", "0.66652715", "0.65947163", "0.65928555", "0.65210867", "0.64140224", "0.63312364", "0.6290925", "0.62714374", "0.6251512", "0.6167499", "0.61642545", "0.6146838", "0.6128...
0.65425885
11
create a new sub block to the current block and return it. the sub block is added to the current block.
def new_block(self): child = Block(self, py3_wrapper=self.py3_wrapper) self.add(child) return child
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def gen_new_block(self):\n block = BasicBlock()\n self.blocks.append(block)\n return block", "def newblock(self, parent=None):\n block = ControlBlock()\n self.blocks.add(block)\n if parent:\n parent.add_child(block)\n return block", "def newblock(self...
[ "0.7300657", "0.7137283", "0.700307", "0.67426383", "0.65247345", "0.64328855", "0.64139366", "0.6404519", "0.6204402", "0.61623967", "0.6048261", "0.5963573", "0.59493285", "0.5928946", "0.5925224", "0.5924873", "0.59158665", "0.5912983", "0.5904347", "0.58974856", "0.586256...
0.73637503
0
block has been split via | so we need to start a new block for that option and return it to the user.
def switch(self): base_block = self.base_block or self self.next_block = Block(self.parent, base_block=base_block, py3_wrapper=self.py3_wrapper) return self.next_block
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def block_one(self):", "def block(self):\n pass", "def handle_request(self, request):\n ret = True\n for option in request.options:\n if option.number == defines.inv_options[\"Block2\"]:\n host, port = request.source\n key = hash(str(host) + str(por...
[ "0.62640697", "0.61661994", "0.6021515", "0.5755425", "0.56972605", "0.5621218", "0.55789465", "0.55772924", "0.5509449", "0.55021685", "0.5426134", "0.53966236", "0.53940547", "0.53849393", "0.5371453", "0.53622526", "0.5353725", "0.5346401", "0.53440475", "0.5342697", "0.53...
0.5264465
24
see if the if condition for a block is valid
def check_valid(self, get_params): if self.commands._if: return self.commands._if.check_valid(get_params)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def check_block(self, block):\n pass", "def is_block(self):\n return self.v & 1 == 0", "def test_29_if(self):\n\t\tinput = \"\"\"procedure main(); var x:integer; begin x:=foo(); end\n\t\tfunction foo():integer; var a:real; begin\n\t\twith a:integer;b:real; do begin if a>0 then return 1; else b:=0; en...
[ "0.78990954", "0.6600305", "0.63182026", "0.63088113", "0.63049823", "0.6304188", "0.6274338", "0.62668514", "0.6249396", "0.62368685", "0.62282825", "0.6225797", "0.62173724", "0.6185306", "0.6149648", "0.61086375", "0.6089408", "0.60828173", "0.6078198", "0.6062693", "0.604...
0.0
-1
render the block and return the output.
def render(self, get_params, module, _if=None): enough = False output = [] valid = None if self.commands.show: valid = True if self.parent and self.commands.soft and _if is None: return None, self if _if: valid = True elif self.commands._if: valid = self.check_valid(get_params) if valid is not False: for item in self.content: if isinstance(item, Placeholder): sub_valid, sub_output, enough = item.get(get_params, self) output.append(sub_output) elif isinstance(item, Literal): sub_valid = None enough = True output.append(item.text) elif isinstance(item, Block): sub_valid, sub_output = item.render(get_params, module) if sub_valid is None: output.append(sub_output) else: output.extend(sub_output) valid = valid or sub_valid if not valid: if self.next_block: valid, output = self.next_block.render(get_params, module, _if=self.commands._if) elif self.parent is None and ((not self.next_block and enough) or self.base_block): valid = True else: output = [] # clean color = self.commands.color if color and color[0] != "#": color_name = f"color_{color}" threshold_color_name = f"color_threshold_{color}" # substitute color color = ( getattr(module, color_name, None) or getattr(module, threshold_color_name, None) or getattr(module.py3, color_name.upper(), None) ) if color == "hidden": return False, [] text = "" out = [] if isinstance(output, str): output = [output] # merge as much output as we can. first = True last_block = None for index, item in enumerate(output): is_block = isinstance(item, Block) if not is_block and item: last_block = None if isinstance(item, (str, bool, int, float, bytes)) or item is None: text += str(item) continue elif text: if not first and (text == "" or out and out[-1].get("color") == color): out[-1]["full_text"] += text else: part = {"full_text": text} if color: part["color"] = color out.append(part) text = "" if isinstance(item, Composite): if color: item.composite_update(item, {"color": color}, soft=True) out.extend(item.get_content()) elif is_block: # if this is a block then likely it is soft. if not out: continue for other in output[index + 1 :]: if other and not isinstance(other, Block): valid, _output = item.render(get_params, module, _if=True) if _output and _output != last_block: last_block = _output out.extend(_output) break else: if item: out.append(item) first = False # add any left over text if text: part = {"full_text": text} if color: part["color"] = color out.append(part) # process any min/max length commands max_length = self.commands.max_length min_length = self.commands.min_length if max_length or min_length: for item in out: if max_length is not None: item["full_text"] = item["full_text"][:max_length] max_length -= len(item["full_text"]) if min_length: min_length -= len(item["full_text"]) if min_length > 0: out[0]["full_text"] = " " * min_length + out[0]["full_text"] min_length = 0 return valid, out
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def render_block(data):\n\tsnippet = data[2] \n\ttitle = data[0]['name']\n\tdescription = data[0]['description']\n\tblock_type = data[0]['type']\n\t\n\n\t# change the panel outline for\n\t# warnings and detections\n\tblock_border = 'yellow' if block_type == 'warning' else 'red1'\n\n\tcode_snippet = Syntax(\n\t\t\t...
[ "0.7029032", "0.69647723", "0.6806205", "0.65881765", "0.65881765", "0.65881765", "0.65881765", "0.65881765", "0.65881765", "0.65558636", "0.6526438", "0.6469616", "0.6436933", "0.64222467", "0.63671017", "0.63276225", "0.6301424", "0.6263438", "0.6262892", "0.6222637", "0.61...
0.56899357
76
Create a new position.
def __init__(self,x,y): self.x = x self.y = y
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def create_position(self):\n raise NotImplementedError", "def create(self, pos):\n self.pos = pos", "def position(self, create, position=2, **kwargs): # pylint: disable=unused-argument, method-hidden\r\n self.position = position", "def position(self, create, position=2, **kwargs): # pylint...
[ "0.847801", "0.8391693", "0.8154143", "0.8154143", "0.7296945", "0.692898", "0.678412", "0.6775694", "0.67575675", "0.6718587", "0.6701663", "0.65915143", "0.6572498", "0.65635514", "0.65402424", "0.65402424", "0.65402424", "0.65402424", "0.65402424", "0.647187", "0.64189696"...
0.0
-1
Display position in user friendly manner.
def __repr__(self): return "("+str(self.x)+","+str(self.y)+")"
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def position(self):\r\n pass", "def display(self):\n \"\"\" Coordinates for position are x-axis (LR) and y-axis (NS) \"\"\"\n for coordY in range(self.y):\n print()\n for column in range(self.height):\n for coordLR in range(self.x):\n print(\" \", ...
[ "0.7464939", "0.7350452", "0.7230497", "0.71928203", "0.7168089", "0.71171767", "0.6995238", "0.6969509", "0.6844631", "0.68162245", "0.6798185", "0.6786135", "0.665452", "0.6650976", "0.66377974", "0.66324097", "0.6619713", "0.66189533", "0.6613451", "0.6598675", "0.65528977...
0.0
-1
Returns a tuple of position in (x,y) form.
def get(self): return (self.x,self.y);
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_position(self) -> typing.Tuple[int, int]:\n raise NotImplementedError", "def get_pos(self):\n return (self.x, self.y)", "def position(self) -> Tuple[int, int]:\n return self.row, self.col", "def get_position(self):\n return (self.x_pos, self.y_pos)", "def get_pos(self) -...
[ "0.8117628", "0.80366904", "0.80062807", "0.7994267", "0.79300076", "0.7879146", "0.78181684", "0.7731477", "0.7600716", "0.756264", "0.7516764", "0.75011235", "0.7460002", "0.7377023", "0.7375958", "0.73217714", "0.7272441", "0.72643983", "0.72562724", "0.7233895", "0.723070...
0.7212456
21
Displays a string in user friendly manner.
def __repr__(self): return "("+str(self.pos)+","+str(self.color)+")"
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def see(s):\n print(\"---- %s -----\" % s)", "def _message_display(string):\n print(\"========================================\")\n print(string)\n print(time.asctime(time.localtime(time.time())))\n print(\"========================================\")", "def show_on_screen(self, string, location,...
[ "0.71144617", "0.6865019", "0.6777228", "0.6654619", "0.6601663", "0.6576626", "0.65690356", "0.654347", "0.6448489", "0.6334183", "0.63277024", "0.6308555", "0.6306732", "0.6305804", "0.62739414", "0.62355924", "0.6233103", "0.62326515", "0.62177855", "0.62035185", "0.619715...
0.0
-1
Compares color to pawn color, returns true if they are the same.
def isColor(self,color): return self.color==color
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def same_color(self, other: 'Piece') -> bool:\n\n return self.color == other.color", "def color_check_mate(self, mycolor):\n\n if not self.color_in_check(mycolor):\n return False\n\n incheck = True\n for (x, y) in self.__players[mycolor]:\n moves = self._get_piec...
[ "0.7082352", "0.6642375", "0.6420325", "0.63803416", "0.6271055", "0.6236686", "0.62152225", "0.6214757", "0.6146159", "0.6136335", "0.6056689", "0.6049656", "0.6021747", "0.60207504", "0.6019101", "0.60071254", "0.6001189", "0.6000824", "0.59897274", "0.59405607", "0.5914164...
0.6147662
8
Sets the position of the pawn
def move(self,x,y): self.pos.x = x self.pos.y = y
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def setPosition(self):\n # determine posX, posY for battle\n (x1,y1) = globals.battlemapQuadrants[self.systemGrid]\n self.posX = x1+self.setX\n self.posY = y1+self.setY", "def setPosition(position):", "def setPosition(self):\n self.data['pos-x'] = \"%s\" % self.x()\n s...
[ "0.7676149", "0.7314917", "0.73105", "0.7299208", "0.7293732", "0.722337", "0.72098196", "0.704315", "0.70363337", "0.6967169", "0.6944696", "0.6944696", "0.6944696", "0.6944696", "0.6944696", "0.6944696", "0.6944696", "0.6944696", "0.6944696", "0.6944696", "0.6944696", "0....
0.638652
55
Create a new object
def __init__(self,state,player=WHITE): if(state==None): self.gameState = dict() for x in range(0,WIDTH): for y in range(0,HEIGHT): self.gameState[x,y] = EMPTY for x in range(0,WIDTH): self.gameState[x,BSTARTROW] = BLACK#Blacks starting row self.gameState[x,WSTARTROW] = WHITE#Whites starting row #whites.append(Board.pawn(Board.pos(x,WSTARTROW),WHITE)) #blacks.append(Board.pawn(Board.pos(x,BSTARTROW),BLACK)) else: self.gameState = state self.whoseTurn = player self.cachedWin = False # set to True in winFor() if self.cachedWinner = None
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def new_object(self):\r\n\t\tpass", "def new(self, obj):\n pass", "def create(cls, _):\n return cls", "def create():", "def create():", "def make_object():\n return object()", "def create(cls):\n pass\n return cls()", "def create(self):\n pass", "def create(sel...
[ "0.8279344", "0.81875837", "0.76169944", "0.7532984", "0.7532984", "0.74903953", "0.7413568", "0.7263086", "0.7263086", "0.7263086", "0.7188198", "0.71668696", "0.7105122", "0.70978415", "0.7032688", "0.70286256", "0.69967264", "0.697575", "0.6942721", "0.6913294", "0.6906194...
0.0
-1
Used for debugging and displaying in user friendly manner.
def __repr__(self): s = "" for y in range(0,HEIGHT): temp="" for x in range(0,WIDTH): temp = temp+ str(self.gameState[x,y]) s += temp+"\n" return s
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def debug():", "def output_debug_info(self):", "def debug_string(self):\n\n raise NotImplementedError", "def debug(self):\n raise NotImplementedError", "def debug_print(self):\n print self.title\n print self.storyline\n print self.poster_image_url\n print self.trai...
[ "0.7927461", "0.77589476", "0.74094605", "0.7343519", "0.73350525", "0.7155053", "0.7092897", "0.70533305", "0.70131016", "0.7006562", "0.69903415", "0.69416094", "0.69380265", "0.68714195", "0.6865168", "0.68511516", "0.6811546", "0.68112475", "0.6776409", "0.67534703", "0.6...
0.0
-1
Translate the board description into a string. Used for a hash table.
def __str__(self): s="" for y in range(0,HEIGHT): for x in range(0,WIDTH): s+=str(self.gameState[x,y]) return s
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def StringFromBoard(board):\n\trows = []\n\tfor row in board:\n\t\trows.append('|'.join([' '+square+' ' for square in row]))\n\treturn '\\n-----------\\n'.join(rows)", "def __str__(self):\n rep = \"\"\n for row in range(self._dim):\n for col in range(self._dim):\n rep += S...
[ "0.65844256", "0.65822697", "0.642821", "0.6403273", "0.6371313", "0.6342245", "0.63234556", "0.62662345", "0.62411314", "0.623124", "0.62133116", "0.62067544", "0.6201318", "0.6192333", "0.6180038", "0.61786634", "0.616831", "0.6166834", "0.61507493", "0.6119286", "0.6043222...
0.0
-1
Gives a pawn on the position x,y or returns empty if none exists
def getPawn(self,x,y): if(self.gameState[x,y]==EMPTY): return return Pawn(x,y,self.gameState[x,y])
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def add_a_pawn(self, player, pos_x, pos_y)-> bool:\n if not pos_x in range(0, len(self.grid)) \\\n or not pos_y in range(0, len(self.grid)):\n return False\n if self.grid[pos_x][pos_y].color is None:\n for index_x in range(-1, 2):\n for index_y in range(-1,...
[ "0.6292175", "0.628745", "0.61434907", "0.61041254", "0.6023498", "0.60125977", "0.59928966", "0.5978027", "0.5957675", "0.59455156", "0.5943654", "0.5890515", "0.58391374", "0.5811578", "0.5807527", "0.57815", "0.57768774", "0.57580405", "0.5754607", "0.57248765", "0.5722037...
0.8193444
0
Check if it's a win for player.
def winFor(self,player): if(self.cachedWin == False): won = False; if(player==WHITE): for x in range(0,WIDTH): if(self.gameState[x,0]==WHITE): won = True elif(player==BLACK): for x in range(0,WIDTH): if(self.gameState[x,HEIGHT-1]==BLACK): won = True if(len(self.successors()) == 0):#IF there are no available moves for both players bCount = self.count(BLACK) #check who has the most pawns wCount = self.count(BLACK) if(bCount>wCount): self.cachedWin = True self.cachedWinner = player return True if(wCount>bCount): self.cachedWin = True self.cachedWinner = player return True if(won): self.cachedWin = True self.cachedWinner = player return True else: return False else: return player == self.cachedWinner
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def has_won(board, player):\r\n return False", "def has_won(board, player):\n return False", "def checkForWin(self):\n w = self.getWinner()\n if w == PLAYER or w == AI:\n # self.printBoard()\n # print('%d'%w + ' won!')\n return\n if w == Tie:\n ...
[ "0.82665426", "0.8169365", "0.8155496", "0.7858614", "0.78276265", "0.7803459", "0.7798291", "0.77653474", "0.77357554", "0.76523876", "0.7650778", "0.7639162", "0.76185304", "0.7592742", "0.75793433", "0.757934", "0.7569485", "0.75380087", "0.7517126", "0.74795717", "0.74670...
0.796108
3
needed for search Gets the number pawns on the board of a color.
def count(self,color): count = 0 for y in range(0,HEIGHT): for x in range(0,WIDTH): if(self.gameState[x,y]==color): count+=1 return count
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def count_colors(board, color):\n n = 0\n for cell in board:\n if cell == color:\n n += 1\n elif cell == cinv(color):\n n -= 1\n return n", "def sum_color(self, board, color):\n sum_of_color = 0\n for i in range(board.size):\n for j in range(b...
[ "0.71357477", "0.67711115", "0.6714085", "0.6677235", "0.65223455", "0.6421733", "0.63733757", "0.63176304", "0.6308519", "0.6277486", "0.62195385", "0.6203734", "0.6190637", "0.6115216", "0.61102915", "0.6102846", "0.60874075", "0.6083563", "0.60525626", "0.6047749", "0.6038...
0.6866677
1
Tells if a position is in the game bounds
def inBounds(self,pos): return ((pos.x<WIDTH) & (pos.x>=0) & (pos.y<HEIGHT) & (pos.y>=0))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def in_bounds(self, position):\n row, col = position\n return ((row >= 0 and row < self.height) and\n (col >= 0 and col < self.width))", "def is_in_bounds(pos):\n return PLAYFIELD_PADDING[0] < pos[0] < PLAYFIELD_PADDING[0] +\\\n BLOCK_NUM_WIDTH * Block.WIDTH and PLA...
[ "0.8121233", "0.78420454", "0.783356", "0.7815986", "0.77785796", "0.77391034", "0.7499602", "0.74738336", "0.7412133", "0.7395001", "0.7287691", "0.7282496", "0.72665375", "0.72362554", "0.7205017", "0.7162476", "0.71581644", "0.71300095", "0.71250474", "0.7091146", "0.70606...
0.8067338
1
Takes a pawn and returns it's relative move position
def movePos(self,p,intMove): return pos(p.pos.x-(intMove*self.intPlayer(p.color)),p.pos.y+self.intPlayer(p.color))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def move_pawn(pos, game):\n #Convert coordinates to row and column\n row = int(pos[1]//(SQUARESIZE+FENCEWIDTH))\n col = int(pos[0]//(SQUARESIZE+FENCEWIDTH))\n #Make move\n game.move_pawn(game.get_player_turn(), (col,row))", "def get_move(board, player):\r\n row, col = 0, 0\r\n return row, co...
[ "0.70455617", "0.68026656", "0.6782019", "0.6533234", "0.6489556", "0.64045006", "0.63270336", "0.6322109", "0.6290889", "0.6268482", "0.6268482", "0.62472296", "0.6150574", "0.6125518", "0.61029345", "0.60926723", "0.6087858", "0.60839784", "0.60775626", "0.6045745", "0.6034...
0.6875913
1
Tells if a move is legal
def legalMove(self,p,intMove): mPos = self.movePos(p,intMove)#board position of move if(self.inBounds(mPos)!=True):#Can't make move out of board bounds return False #if(p.color != self.whoseTurn):#Can't make move if it's not players pawn # return False if(intMove==0):#to move forward the node must be empty return (self.gameState[mPos.get()] == EMPTY) else:#to attack the node must have an enemy return (self.gameState[mPos.get()] == self.togglePlayer(p.color))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def move_valid(move):\n return True", "def valid_move(self, player, move):\n return (True)", "def legal_move(self, move, state = None):\n if state is None:\n state = copy(self.state)\n else:\n state = copy(state)\n return state[move // state.shape[0], move %...
[ "0.86168677", "0.78282213", "0.78129864", "0.7796619", "0.7645074", "0.76316094", "0.762242", "0.75282884", "0.74927366", "0.7453383", "0.7452425", "0.73305404", "0.72937834", "0.7274391", "0.7266397", "0.7234366", "0.71924216", "0.7192144", "0.7192", "0.71551216", "0.714581"...
0.69171023
41
needed for search Gets all legal available moves including those for the oppenent
def openMoves(self): arr = [] for y in range(0,HEIGHT): for x in range(0,WIDTH): t = self.getPawn(x,y) if(t!=None): for z in range(-1,2): if(self.legalMove(t,z)): #move , #newState arr.append((t,z)) return arr
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_moves(self, board):\n self.available_moves = [move for move in board.legal_moves]", "def get_available_moves(self, board):\n pass", "def get_legal_moves(self):\n # for each square in the castle figure out if an moves can occur from it.\n moves = []\n allowed = [self.t...
[ "0.7495228", "0.7211233", "0.7106526", "0.6994656", "0.68828064", "0.6859793", "0.6845441", "0.6823255", "0.68220854", "0.6766067", "0.6720792", "0.6693552", "0.6690474", "0.6682", "0.6669322", "0.66576886", "0.6649769", "0.66284806", "0.6552528", "0.6523163", "0.6483351", ...
0.6491643
20
needed for search Create a new board state with the given move
def move(self,p,intMove): gs = self.gameState.copy() #copy Board gs[p.pos.get()] = EMPTY #put position it was at as empty gs[self.movePos(p,intMove).get()] = p.color #set new position as filled return ((p,intMove),Board(gs,self.togglePlayer(self.whoseTurn)))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def make_move(self, move: Any) -> 'StonehengeState':\n if type(move) == str:\n new_state = StonehengeState(not self.p1_turn, self.side_length)\n # copy the board information from current state\n # make copy of current state information\n hori_lst_copy = []\n ...
[ "0.76256883", "0.7569273", "0.7310993", "0.7211619", "0.72101855", "0.7194801", "0.7156599", "0.7146737", "0.7015209", "0.695439", "0.6947784", "0.69166005", "0.68700886", "0.68499726", "0.68240017", "0.68005484", "0.6775084", "0.677143", "0.6759415", "0.6757112", "0.6747905"...
0.6648307
28
A custom Django template tag for encoding URL query string parameters.
def urlparams(*_, **kwargs): non_empty = {k: v for k, v in kwargs.items() if v is not None} if non_empty: return '?{}'.format(urlencode(non_empty)) return ''
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def url_replace(context, **kwargs):\n query = context['request'].GET.dict()\n query.update(kwargs)\n return urlencode(query)", "def url_replace(context, **kwargs):\n query = context['request'].GET.dict()\n query.update(kwargs)\n return urlencode(query)", "def encoded_query_str(request):\n ...
[ "0.6936687", "0.6936687", "0.65685886", "0.6343588", "0.62395805", "0.6233473", "0.6176143", "0.61670446", "0.6154101", "0.6131022", "0.60958886", "0.60618484", "0.6031017", "0.6029763", "0.5963693", "0.59559804", "0.59069306", "0.57844955", "0.57651424", "0.5763139", "0.5728...
0.5674797
24
Used by all TalkChannels when logging messages to preserve messages order
def _mc_gen(): n = 1 while True: yield n n += 1
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _log(self, msg):\n self.telegram_queue.put(f\"{__name__.split('.')[-1]}: {msg}\")", "def log(self, message):", "def _log(self, message):\n pass", "def log_message(self, fmt, *args):\r\n pass\r\n # log_message\r", "def lastMessageReceived():", "def on_message(self, msg):\n ...
[ "0.6550122", "0.63551664", "0.6192797", "0.601406", "0.5984528", "0.5872652", "0.58380395", "0.5811983", "0.5803327", "0.5792493", "0.5775632", "0.57648224", "0.57644266", "0.57573664", "0.57374674", "0.5730767", "0.5729119", "0.5690887", "0.568535", "0.56792635", "0.56616557...
0.0
-1