query
stringlengths
9
3.4k
document
stringlengths
9
87.4k
metadata
dict
negatives
listlengths
4
101
negative_scores
listlengths
4
101
document_score
stringlengths
3
10
document_rank
stringclasses
102 values
List of outgoing edges from a vertex.
def get_successors(self, pos: tuple): if self.is_obstacle(pos): return {} else: x, y = pos[0], pos[1] neighbours = [(x + 1, y), (x + 1, y + 1), (x, y + 1), (x - 1, y + 1), (x - 1, y), (x - 1, y - 1), (x, y - 1), (x + 1, y - 1)] return {k: self.move_cost(pos, k) for k in neighbours if self.is_free(k)}
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def out_edges(self, vertex):\n return self[vertex].values()", "def outgoing_edges(self, vertices, labels=True):\n return list(self.outgoing_edge_iterator(vertices, labels=labels))", "def edges(self):\n es = []\n for vertex1 in self.vertices():\n for vertex2 in self.out_ve...
[ "0.79884607", "0.7752863", "0.7353233", "0.72382283", "0.7054124", "0.6991357", "0.6975038", "0.6930764", "0.6925828", "0.6879445", "0.6827964", "0.67977715", "0.67761004", "0.67647284", "0.67479455", "0.6713683", "0.66829216", "0.6643639", "0.6629576", "0.6628409", "0.662827...
0.0
-1
List of incoming edges to a vertex.
def get_predecessors(self, pos: tuple): return self.get_successors(pos)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def incoming_edges(self, vertices, labels=True):\n return list(self.incoming_edge_iterator(vertices, labels=labels))", "def edges(self):\n es = []\n for vertex1 in self.vertices():\n for vertex2 in self.out_vertices(vertex1):\n es.append(self[vertex1][vertex2])\n ...
[ "0.7818218", "0.72931284", "0.7168712", "0.702791", "0.69290024", "0.6920739", "0.6894563", "0.68887043", "0.687522", "0.6836193", "0.6818338", "0.67833453", "0.6775497", "0.67643553", "0.67224413", "0.67204434", "0.66900426", "0.6682848", "0.66743505", "0.66723615", "0.66631...
0.0
-1
This is the main script for the bigmacc process. It iteartes through various CEA and bigmacc operations for each key (i.e. 01011101). It ends by saving a sample of the hourly results across the key for each building in a netcdf and then wiping the project files to reset them for the next iteration.
def run(config): locator = cea.inputlocator.InputLocator(config.scenario) print('Key in run') print(config.bigmacc.key) i = config.bigmacc.key print(i) # SCENARIO SETUP --- config.general.project = os.path.join(config.bigmacc.data, config.general.parent, i) print(config.general.project) cea.datamanagement.data_initializer.main(config) # use the scenario code to set the year for the lca and other operations that need the current year pathway_code = config.general.parent pathway_items = pathway_code.split('_') scenario_year = int(pathway_items[1]) config.emissions.year_to_calculate = scenario_year bigmacc_outputs_path = os.path.join(config.bigmacc.data, config.general.parent, 'bigmacc_out', config.bigmacc.round) scen_check = pd.read_csv(os.path.join(bigmacc_outputs_path, 'logger.csv'), index_col='Unnamed: 0') experiment_key = 'exp_{}'.format(i) print(experiment_key) keys = [int(x) for x in str(i)] if experiment_key in scen_check['Experiments'].values.tolist(): print('Experiment was finished previously, moving to next.') pass else: print('START: experiment {}.'.format(i)) # INITIALIZE TIMER --- t0 = time.perf_counter() if os.path.exists(os.path.join(config.bigmacc.data, config.general.parent, i)): print(' - Folder exists for experiment {}.'.format(i)) else: os.mkdir(os.path.join(config.bigmacc.data, config.general.parent, i)) print(' - Folder does not exist for experiment {}, creating now.'.format(i)) # run the archetype mapper to leverage the newly loaded typology file and set parameters print(' - Running archetype mapper for experiment {} to remove changes made in the last experiment.'.format(i)) cea.datamanagement.archetypes_mapper.main(config) # run the rule checker to set the scenario parameters print(' - Running rule checker for experiment {}.'.format(i)) cea.bigmacc.bigmacc_rules.main(config) # SIMULATIONS --- print(' - Run radiation is {}.'.format(config.bigmacc.runrad)) print(' - Write sensor data is {}.'.format(config.radiation.write_sensor_data)) # checking on need for radiation simulation if config.bigmacc.runrad == True: # this nested statement is for when we rerun the simulations and no longer need to run the unique radiation if config.bigmacc.rerun != True: print(' - Running radiation simulation for experiment {}.'.format(i)) if os.path.exists(locator.get_radiation_building('B000')): print(' - Radiation folder exists for experiment {}, copying.'.format(i)) else: print(' - Radiation running for experiment {}.'.format(i)) cea.resources.radiation_daysim.radiation_main.main(config) else: # print(' - Copying radiation simulation data from previous run for experiment {}.'.format(i)) old_rad_files = os.path.join(config.bigmacc.data, config.general.parent, i, config.general.scenario_name, 'outputs', 'data', 'solar-radiation') # distutils.dir_util.copy_tree(old_rad_files, locator.get_solar_radiation_folder()) else: radfiles = config.bigmacc.copyrad # print(' - Copying radiation results from {}.'.format(radfiles)) # distutils.dir_util.copy_tree(radfiles, locator.get_solar_radiation_folder()) print(' - Experiment {} does not require new radiation simulation.'.format(i)) # running demand forecasting if os.path.exists(locator.get_schedule_model_file('B000')): print(' - Schedules exist for experiment {}.'.format(i)) else: print(' - Schedule maker running for experiment {}.'.format(i)) schedule_maker.main(config) # check to see if we need to rerun demand or if we can copy if config.bigmacc.rerun != True: print(' - Running demand simulation for experiment {}.'.format(i)) cea.demand.demand_main.main(config) else: if keys[0] == 1: print(' - Running demand simulation for experiment {}.'.format(i)) cea.demand.demand_main.main(config) elif keys[6] == 1: print(' - Running demand simulation for experiment {}.'.format(i)) cea.demand.demand_main.main(config) else: cea.demand.demand_main.main(config) # print(' - Looking for demand results data from previous run for experiment {}.'.format(i)) # old_demand_files = os.path.join(config.bigmacc.data, config.general.parent, i, # config.general.scenario_name, 'outputs', 'data', 'demand') # if os.path.exists(old_demand_files): # # print(' - Copy demand results files from previous run of experiment {}.'.format(i)) # # distutils.dir_util.copy_tree(old_demand_files, locator.get_demand_results_folder()) # pass # else: # print(' - No results found.') # print(' - Running demand simulation for experiment {}.'.format(i)) # cea.demand.demand_main.main(config) if config.bigmacc.pv == True: print(' - Run PV is {}.'.format(config.bigmacc.pv)) if config.bigmacc.rerun == True: print(' - Looking for radiation simulation data from previous run for experiment {}.'.format(i)) old_pv_files = os.path.join(config.bigmacc.data, config.general.parent, i, config.general.scenario_name, 'outputs', 'data', 'potentials', 'solar') if os.path.exists(old_pv_files): # print(' - Copying PV files from previous run of experiment {}.'.format(i)) # distutils.dir_util.copy_tree(old_pv_files, locator.solar_potential_folder()) pass else: print(' - PV files do not exist for previous run of experiment {} at {}.'.format(i, old_pv_files)) print(' - Running PV simulation for experiment {}.'.format(i)) photovoltaic.main(config) else: # if PV simulation is needed, run it. print(' - Running PV simulation for experiment {}.'.format(i)) photovoltaic.main(config) print('Run water-body exchange is {}.'.format(config.bigmacc.water)) # if water-body simulation is needed, run it. if config.bigmacc.water == True: print(' - Running water body simulation for experiment {}.'.format(i)) water.main(config) # recalculating the supply split between grid and ng in the websrook DH if keys[4] == 1: print(' - Do not run district heat recalculation.') else: print(' - Run district heat recalculation.') cea.bigmacc.wesbrook_DH.main(config) if keys[7] == 1: print(' - PV use detected. Adding PV generation to demand files.') util.write_pv_to_demand(config) else: print(' - No PV use detected.') # running the emissions and costing calculations print(' - Run cost and emissions scripts.') cea.analysis.costs.system_costs.main(config) cea.analysis.lca.main.main(config) # clone out the simulation inputs and outputs directory print(' - Transferring results directory for experiment {}.'.format(i)) new_inputs_path = os.path.join(config.bigmacc.data, config.general.parent, i, config.general.scenario_name, 'inputs') new_outputs_path = os.path.join(config.bigmacc.data, config.general.parent, i, config.general.scenario_name, 'outputs', 'data') if config.bigmacc.rerun != True: distutils.dir_util.copy_tree(locator.get_data_results_folder(), new_outputs_path) distutils.dir_util.copy_tree(locator.get_input_folder(), new_inputs_path) time_elapsed = time.perf_counter() - t0 # save log information log_df = pd.read_csv(os.path.join(bigmacc_outputs_path, 'logger.csv'), index_col='Unnamed: 0') log_df = log_df.append(pd.DataFrame({'Experiments': 'exp_{}'.format(i), 'Completed': 'True', 'Experiment Time': '%d.2 seconds' % time_elapsed, 'Unique Radiation': config.bigmacc.runrad}, index=[0]), ignore_index=True) log_df.to_csv(os.path.join(bigmacc_outputs_path, 'logger.csv')) log_df.to_csv(r"C:\Users\justi\Desktop\126logger_backup.csv", ) # write netcdf of hourly_results netcdf_writer.main(config, time='hourly') if config.bigmacc.rerun != True: shutil.rmtree(locator.get_costs_folder()) shutil.rmtree(locator.get_demand_results_folder()) shutil.rmtree(locator.get_lca_emissions_results_folder()) shutil.rmtree(locator.get_solar_radiation_folder()) shutil.rmtree(locator.get_potentials_folder()) else: print(' - Rerun does not require purging of the files.') # when the setpoint is changed it is in a deeper database than the archetypes mapper can reach so reset it here if keys[0] == 1: cea.datamanagement.data_initializer.main(config) else: pass print('END: experiment {}. \n'.format(i))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def go():\n ##########\n #\n # MB19284\n #\n ##########\n\n ##########\n # Kp-band reduction\n ##########\n\n target = 'mb19284'\n sci_files = ['i200822_a011{0:03d}_flip'.format(ii) for ii in range(2, 5+1)]\n sci_files += ['i200822_a012{0:03d}_flip'.format(ii) for ii in range(2, 25...
[ "0.6137783", "0.610961", "0.6097006", "0.6071778", "0.59867096", "0.5944537", "0.59281546", "0.5890876", "0.5888353", "0.58699083", "0.5863766", "0.5852129", "0.58403766", "0.58233374", "0.58191043", "0.5813233", "0.5811937", "0.5777856", "0.5767777", "0.5765127", "0.57320464...
0.6892021
0
Zwraca "x/y" lub "x" dla y=1
def __str__(self): if self.y == 1: return "{}".format(self.x) else: return "{}/{}".format(self.x, self.y)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def divide(x, y):\n\n return x / y", "def division(x, y):\n return x / y", "def divide(x, y):\n return x / y", "def divide(x, y):\n return x / y", "def divide(x, y):\n return x / y", "def prop(x,y):\n return 1 / (1/x + 1/y - 1)", "def divide(x, y):\n return round(x / y)", "def div...
[ "0.7029484", "0.701576", "0.69088227", "0.69088227", "0.69088227", "0.6760368", "0.662832", "0.6536117", "0.65148705", "0.6464375", "0.64554894", "0.63766176", "0.62106633", "0.6187397", "0.617965", "0.613983", "0.6095693", "0.6090082", "0.60515857", "0.6038736", "0.5985184",...
0.5700002
43
returns if postcode like
def is_postal_code(elem): return 'post' in elem.attrib['k']
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def postcode(self):\n return self._postcode", "def postcode(self):\n return self._postcode", "def validate_postcode_format(self):\n\n assert type(self.postcodes) == str, \"To use this method, the postcode cannot be an iterable.\"\n pcd = self.postcodes.replace(' ', '')\n # Th...
[ "0.65805596", "0.65805596", "0.6374579", "0.63254106", "0.6238786", "0.6128216", "0.6051144", "0.60343444", "0.586208", "0.58339965", "0.5811099", "0.57576764", "0.57576764", "0.5754006", "0.5704175", "0.56964314", "0.56823075", "0.56657773", "0.5656511", "0.5651277", "0.5647...
0.7000855
0
Add a journal entry.
def add_entry(self, entry: str) -> None: self.entries.append(f"{self.count}: {entry}") self.count += 1
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def add():\n form = forms.JournalForm()\n if form.validate_on_submit():\n models.Journal.create(\n title=form.title.data,\n date=form.date.data,\n time_spent=form.time_spent.data,\n learnt=form.learnt.data,\n resources=form.resources.data)\n ...
[ "0.7191207", "0.7089341", "0.70372695", "0.6757535", "0.66126496", "0.6612139", "0.6573566", "0.6421015", "0.63883895", "0.63381886", "0.6327133", "0.622145", "0.61824554", "0.61513895", "0.60793686", "0.6065312", "0.6048327", "0.5997899", "0.59614867", "0.59608066", "0.59607...
0.62996215
11
Remove journal entry at position `pos`.
def remove_entry(self, pos: int) -> None: del self.entries[pos]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def delete_pos(self, pos):\n yield from self.command('delete {}'.format(pos))\n return True", "def delete(self, pos):\n if self.is_playing() and self.current_position() == pos:\n self.x.playback_stop().wait()\n self.x.playlist_remove_entry(pos).wait()", "def delete_ro...
[ "0.6454446", "0.6282452", "0.6261571", "0.61265475", "0.6009769", "0.59815764", "0.58335143", "0.57944274", "0.5689213", "0.5677288", "0.56020075", "0.55991143", "0.55826235", "0.55457", "0.55305755", "0.55185974", "0.5401358", "0.5388663", "0.5358339", "0.5331406", "0.530921...
0.81650764
0
Get entries stored in this journal.
def get_entries(self) -> Generator[str, None, None]: return (entry for entry in self.entries)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def getEntries(self):\n return self.entries", "def getEntries(self):\n return self.__entries", "def get_entries(self) -> List[Entry]:\n return list(self.__entries)", "def entries(self):\n return self._entries", "def entries(self):\n return self._entries", "def get_entri...
[ "0.7713826", "0.76981765", "0.7545487", "0.74680614", "0.74680614", "0.74099416", "0.71468115", "0.7117516", "0.70767915", "0.69040763", "0.69031703", "0.66488487", "0.66306955", "0.65737164", "0.65379286", "0.6522505", "0.6514958", "0.6387671", "0.6290385", "0.6206425", "0.6...
0.68382215
11
Save journal entries into a file.
def save(journal: Journal, file: Path) -> None: with open(file, "w") as output: output.writelines(f"{entry}\n" for entry in journal.get_entries())
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def save_txt():\n # open file and append, if it doesn't exist then create it.\n with open('journal_entries.txt', 'a+') as f:\n # .get the input in text widget at the first line, '0th' character, then read until the end\n f.write(\"\\n\" + get_date_time())\n for i in range(len(entries)):\...
[ "0.71133924", "0.70640576", "0.68299794", "0.68087065", "0.6638848", "0.6473705", "0.64459836", "0.61746615", "0.61529505", "0.61082065", "0.6088982", "0.60406435", "0.60406435", "0.5983607", "0.5972086", "0.59578437", "0.5955684", "0.59284395", "0.58410317", "0.58036464", "0...
0.8384327
0
Load journal entries from a file.
def load(journal: Journal, file: Path) -> None:
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def load(name):\n jrn_path = build_path(name)\n if not os.path.exists(jrn_path):\n print(f'... journal file \\'{jrn_path}\\' does not exist ...')\n print('... initializing new journal ...')\n with open(jrn_path, 'w') as file:\n pass\n return []\n else:\n print...
[ "0.7394419", "0.64924157", "0.62799805", "0.6213144", "0.6060179", "0.6032827", "0.5937667", "0.5873339", "0.5792897", "0.57746816", "0.57655644", "0.57224065", "0.56214803", "0.5567517", "0.5565053", "0.5565053", "0.5560204", "0.55173564", "0.54818314", "0.54786164", "0.5453...
0.81673855
0
Load journal entries from a URI.
def load_from_web(journal: Journal, uri: str) -> None:
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def load(name):\n jrn_path = build_path(name)\n if not os.path.exists(jrn_path):\n print(f'... journal file \\'{jrn_path}\\' does not exist ...')\n print('... initializing new journal ...')\n with open(jrn_path, 'w') as file:\n pass\n return []\n else:\n print...
[ "0.6321398", "0.6246365", "0.5560296", "0.5538123", "0.53121865", "0.5294257", "0.5108225", "0.51056397", "0.50977325", "0.50559926", "0.5041016", "0.49502626", "0.49426818", "0.49289203", "0.49164444", "0.48940632", "0.48866275", "0.48804903", "0.48804903", "0.48515296", "0....
0.75825113
0
If passed two rows start generating collection of forests. Other way, if passed previous generation of collection spawning next generation
def __init__(self, settings, input_row=None, output_row=None, previous_generation=None): self._fullInput = [] self.power = 0 self._forests = [] self._fullOutput = [] self.best_fitness = 0 self.roulet = [] self.settings = settings if input_row and output_row: self._generate(list(input_row), list(output_row)) elif previous_generation: self._next_generation(previous_generation) else: raise Exception('wrong arg"s')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _next_generation(self, previous_generation):\n self._fullInput, self._fullOutput = previous_generation.get_data()\n self.power = self.settings.population_count\n for forest_iteration in range(self.power):\n first, second = previous_generation.selection()\n print 'sele...
[ "0.66333145", "0.62098485", "0.61860573", "0.59795815", "0.5897099", "0.58752435", "0.57888967", "0.56387043", "0.56354517", "0.5580544", "0.55045325", "0.5491345", "0.5488172", "0.5473442", "0.5453452", "0.5444366", "0.5409794", "0.5389145", "0.5388282", "0.5374205", "0.5366...
0.0
-1
Generating number of forests (it's random in some frame).
def _generate(self, input_row, output_row): self._fullInput = input_row self.power = self.settings.population_count self._fullOutput = output_row for one_forest in range(self.power): self._forests.append(OneForest(self.settings, input_row=self._fullInput, full_output=self._fullOutput))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def numberOfNodes( gen ):\n return int( scipy.sum( [ 3.**i for i in range( 1, gen + 2 ) ] ) )", "def set_rf_samples(n):\n forest._generate_sample_indices = (lambda rs, n_samples:\n forest.check_random_state(rs).randint(0, n_samples, n))", "def reset_rf_samples():\n forest._generate_sample_indic...
[ "0.60863966", "0.60740316", "0.6011651", "0.59940726", "0.5975096", "0.59473723", "0.5862948", "0.583307", "0.5804934", "0.5777276", "0.5736017", "0.57318246", "0.57276255", "0.5683988", "0.5664078", "0.56328785", "0.56160444", "0.56090194", "0.55513465", "0.5541817", "0.5539...
0.0
-1
Spawning next generation of collection by selecting n pairs of distinct forests from previous generation and them over.
def _next_generation(self, previous_generation): self._fullInput, self._fullOutput = previous_generation.get_data() self.power = self.settings.population_count for forest_iteration in range(self.power): first, second = previous_generation.selection() print 'selected for crossover ->', first.fitness, second.fitness self._forests.append(OneForest(self.settings, first_forest=first, second_forest=second))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def step(self):\n # amt_selected = \\\n # int(self.population_size * self.part_selected) \n\n # spawning_pool = [] # list of dna selected for reproduction\n new_data =[]\n \n sorted_dna = sorted(self.data, \n key=lambda dna: dna.fitness_fun...
[ "0.66252106", "0.61852103", "0.61620736", "0.6136829", "0.61138487", "0.6082674", "0.6079343", "0.6030366", "0.6008962", "0.5985152", "0.5984775", "0.5952923", "0.58702976", "0.5806566", "0.57817847", "0.5779176", "0.57665956", "0.57347685", "0.57222146", "0.5704095", "0.5702...
0.6936438
0
Just outputting private data.
def get_data(self): return self._fullInput, self._fullOutput
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def print_private(self):\n print('Account Number : ', self.__Account)\n return \"\"", "def _printable(self):\n pass", "def output_data(self):\n pass", "def PrettyPrint(self):\r\n print(self.data)\r\n return", "def print_out():\n pass", "def _print_custom(s...
[ "0.69535846", "0.6887361", "0.66857016", "0.66233796", "0.6584199", "0.6315667", "0.62706643", "0.6250064", "0.6220874", "0.6203634", "0.6198626", "0.6130218", "0.6088508", "0.6044061", "0.603079", "0.603079", "0.602542", "0.60135573", "0.5967384", "0.5955937", "0.5954297", ...
0.0
-1
Executing every forest in collection, activating their networks. By the way collecting data about best fitness function.
def execute(self): process_list = [] forests_queue = Queue(self.power) iterational = 0 print '| |-starting evaluation, training and validation' for one_forest in self._forests: process_list.append( Process(target=main_async_method, args=(forests_queue, copy(one_forest.to_portal()), iterational, self.settings))) iterational += 1 for proc in process_list: proc.start() for proc in process_list: proc.join() for smth in range(forests_queue.qsize()): tmp = forests_queue.get() self._forests[tmp['place']].fitness = tmp['fitness'] fitness_summ = sum(map(lambda forest: forest.fitness, self._forests)) fss = map(lambda x: x.fitness, self._forests) print 'avg = ', str(sum(fss) / len(fss)), 'max = ', max(fss) self.roulet = map(lambda x: x.fitness / fitness_summ, self._forests)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def mutate(self):\n for forest in self._forests:\n forest.mutate(self._fullInput)", "def run(self, num_iterations = 50, **kwargs):\n \n #setup system\n self.cost_calculator = t.CostCalculator(self.suppliers_allcards, self.all_ensembles_dict)\n bounds = np.array(self....
[ "0.66012484", "0.62504995", "0.61932045", "0.6135642", "0.6134355", "0.61263996", "0.6049594", "0.5867963", "0.5865974", "0.5832648", "0.581788", "0.5802029", "0.57808244", "0.5779419", "0.5708533", "0.56901664", "0.5634386", "0.56181246", "0.558044", "0.553923", "0.55354095"...
0.7236054
0
selecting one point in probability gist
def select_by_prob(self): ball = random() stop_sector = 0 for sector in self.roulet: ball -= sector if ball < 0: return stop_sector else: stop_sector += 1 return stop_sector
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def pick_one(self):\n index = 0\n r = random.random()\n while r >= 0:\n r = r - self.normalised_fitness[index]\n index += 1\n index -= 1\n return self.population[index]", "def selection(probs):\n # pick 2 parents out of this distribution\n t = [i for...
[ "0.65602857", "0.63671464", "0.6342662", "0.6314846", "0.6178516", "0.6008465", "0.5948356", "0.5907734", "0.59043133", "0.58996654", "0.58957005", "0.5890248", "0.58802027", "0.586684", "0.5834397", "0.57978284", "0.57956886", "0.578882", "0.57851636", "0.5766072", "0.574572...
0.0
-1
Selecting distinct pair of forests for crossover. Probability of selecting one forest is as much as that fitness function is better.
def selection(self): first = self.select_by_prob() second = first while self._forests[first] == self._forests[second]: second = self.select_by_prob() return self._forests[first], self._forests[second]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def crossoverIndividuals(father, mother, bwsFitnessFunction, highIsGood):\n\n #choose depth of crossover point at random\n crossoverDepth = round(random.uniform(1,father.getDepth()))\n\n #get all subtrees of father and mother at that layer of deepness\n fatherNodesAtLayer = father.getNodesAtDepth(crossoverDept...
[ "0.65794104", "0.6549801", "0.6248398", "0.6199317", "0.6146603", "0.5895406", "0.5845301", "0.5820917", "0.57894474", "0.574185", "0.567809", "0.5662331", "0.56227505", "0.5615345", "0.5599909", "0.55788946", "0.55743223", "0.556973", "0.5551336", "0.5547457", "0.55375916", ...
0.63705814
2
Just mutating every forest in collection.
def mutate(self): for forest in self._forests: forest.mutate(self._fullInput)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def unifyPreviewNodes(self):\n\n self.leaves.update(self.forced)\n self.forced = set()", "def update(self):\n map(lambda x: x.update(), self._children.values())", "def update (self) :\n for met in self.gene :\n met(self)", "def update(self, list_of_sets):\n for s in list...
[ "0.59149694", "0.5712289", "0.54862785", "0.54687923", "0.5425373", "0.54242694", "0.5323184", "0.5317251", "0.52714866", "0.5256046", "0.525473", "0.524264", "0.52372867", "0.5176812", "0.51745", "0.5156172", "0.51512945", "0.51447666", "0.50903946", "0.5075218", "0.5069291"...
0.7783784
0
Query al cloud SGL
def query_ensor(sensorURI, fromTime, toTime, valueName): s = f"https://smartgardalake.snap4.eu/ServiceMap/api/v1/?serviceUri={sensorURI}&fromTime={fromTime}&toTime={toTime}&valueName={valueName}" print(s) response = requests.get(s) data = response.json() values = [] try: values = data["realtime"]["results"]["bindings"] except KeyError: print("[WARN] empty dataset") values.reverse() result = { "measuredTime": [], valueName: [], } print(len(values)) for i in range(len(values)): v = values[i] result["measuredTime"].append(v["measuredTime"]["value"]) try: float_measure = float(v[valueName]["value"]) if valueName == "CO2" and float_measure > 2000: result[valueName].append(np.nan) else: result[valueName].append(float_measure) except ValueError: result[valueName].append(np.nan) return result
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def query3() :", "def query(self):", "def query(output, query):\n gqlapi = gql.get_api()\n print_output(output, gqlapi.query(query))", "def query(self, query):", "def query(self, **kwargs):", "def soql_query(self, query):\n self.builtin.log(\"Running SOQL Query: {}\".format(query))\n ...
[ "0.6597153", "0.65192515", "0.6470072", "0.6186665", "0.60921586", "0.6054754", "0.6036551", "0.596416", "0.59399474", "0.59136164", "0.5830741", "0.57854694", "0.5752966", "0.57420826", "0.573901", "0.57316273", "0.5722046", "0.57058287", "0.5688149", "0.5679209", "0.5648212...
0.0
-1
Query a SGL di un sensore del traffico Vedi query_ensor() per sensorURI, fromTime e toTime
def get_traffic_sensor_df(sensorURI: str, fromTime: str, toTime: str, resampleFreq: str = None, remove_outliers=False): values = ["count", "sumSpeed"] result = None for v in values: # data = query_ensor(sensorURI, fromTime, toTime, v) data = multiday_query(sensorURI, fromTime, toTime, v) df = pd.DataFrame(data, columns=["measuredTime", v]) df["measuredTime"] = pd.to_datetime(df["measuredTime"]) df.index = df["measuredTime"] del df["measuredTime"] if remove_outliers: z_scores = np.abs(stats.zscore(df)) print(f"Removed outliers: {df.size - df[(z_scores < 3).all(axis=1)].size}") df = df[(z_scores < 3).all(axis=1)] if resampleFreq is not None: df = df.resample(resampleFreq).sum() if result is not None: result = pd.merge_ordered(result, df, left_on="measuredTime", right_on="measuredTime") result.index = result["measuredTime"] del result["measuredTime"] else: result = df # avg speed result["avgSpeed"] = result["sumSpeed"] / result["count"] result.loc[~np.isfinite(result["avgSpeed"]), "avgSpeed"] = np.nan result["avgSpeed"] = result["avgSpeed"].interpolate() return result
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def query_ensor(sensorURI, fromTime, toTime, valueName):\n\n s = f\"https://smartgardalake.snap4.eu/ServiceMap/api/v1/?serviceUri={sensorURI}&fromTime={fromTime}&toTime={toTime}&valueName={valueName}\"\n print(s)\n response = requests.get(s)\n data = response.json()\n values = []\n try:\n ...
[ "0.78036326", "0.56300086", "0.56219053", "0.54439676", "0.5414489", "0.5369397", "0.53085774", "0.5199531", "0.5161808", "0.51520276", "0.5134034", "0.5122384", "0.510574", "0.50754833", "0.50372785", "0.5034987", "0.5008669", "0.50082725", "0.5000846", "0.49865463", "0.4974...
0.59702265
1
Leggi dati del traffico da .csv, il file dev'essere nel formato letto da SGL
def read_traffic_sensor_from_csv(path: str) -> pd.DataFrame: df = pd.read_csv(path) df["measuredTime"] = pd.to_datetime(df["measuredTime"]) df.set_index("measuredTime", inplace=True) return df
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def read_csv():", "def get_data(self, csv_file):\n pass", "def format_porteurs(filepath):\n fieldnames, rows = get_header_rows(filepath)\n\n if \"statut\" in fieldnames:\n fieldnames.append(\"situation_societariat_entrance\")\n fieldnames.append(\"situation_situation\")\n for ...
[ "0.7196465", "0.6494136", "0.64422953", "0.64121443", "0.63839144", "0.634418", "0.63111466", "0.62877053", "0.6264619", "0.624226", "0.6232147", "0.6205612", "0.61853033", "0.6169304", "0.6101325", "0.6096724", "0.6090506", "0.6071633", "0.6044117", "0.5981105", "0.597794", ...
0.0
-1
Run ShRec3D on all data in data directory
def generate_data(out_fname, data_directory): def store_result(duration, loci_number): """ Store result of current timing run """ print(' %ds for %d loci' % (duration, loci_number)) if os.path.isfile(out_fname): with open(out_fname, 'r') as fd: cur = json.load(fd) else: cur = [] with open(out_fname, 'w') as fd: cur.append((loci_number, duration)) json.dump(cur, fd) for fn in os.listdir(data_directory): fname = os.path.join(data_directory, fn) print('Loading "%s"...' % fname, end=' ', flush=True) contacts = np.loadtxt(fname) print('Done') start = time.time() try: apply_shrec3d(contacts) except: print('>>> Some error occured') traceback.print_exc() end = time.time() store_result(end-start, contacts.shape[0])
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def run_dataset(data: DataSetBase) -> None:\n\n tracks_manager = data.load_tracks_manager()\n reconstructions = data.load_reconstruction()\n\n all_shot_ids = set(tracks_manager.get_shot_ids())\n for r in reconstructions:\n for shot in r.shots.values():\n if shot.id in all_shot_ids:\n ...
[ "0.64618134", "0.608151", "0.59120786", "0.5877814", "0.56381893", "0.5586844", "0.5561171", "0.5528533", "0.5522206", "0.55164164", "0.55111134", "0.5507343", "0.54734784", "0.5462257", "0.54563326", "0.5446462", "0.54454964", "0.54451734", "0.542079", "0.5415783", "0.540939...
0.6006584
2
Store result of current timing run
def store_result(duration, loci_number): print(' %ds for %d loci' % (duration, loci_number)) if os.path.isfile(out_fname): with open(out_fname, 'r') as fd: cur = json.load(fd) else: cur = [] with open(out_fname, 'w') as fd: cur.append((loci_number, duration)) json.dump(cur, fd)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def store_result_from_run(self, result):\n with self._result_lock:\n self._results.append(result)\n\n # A race here isn't problematic. Better not to hold the lock during an\n # is_crash call.\n if not self.last_failing_result and result.is_crash():\n self.last_failing_result = result", "def...
[ "0.68735605", "0.65807796", "0.62916756", "0.6186423", "0.615599", "0.6137281", "0.60909104", "0.6023061", "0.60019714", "0.59586823", "0.594622", "0.5925285", "0.59251946", "0.5902604", "0.5852945", "0.58364284", "0.5809314", "0.5793759", "0.5784017", "0.5747181", "0.572242"...
0.5750923
19
Plot time points given in data file and compare to x3
def plot_data(fname): if not os.path.isfile(fname): print('No data has been generated yet, aborting...') sys.exit(1) with open(fname, 'r') as fd: data = json.load(fd) x = np.arange(0, max(data, key=lambda e: e[0])[0], 1) const = .55e-8 func = lambda x: const * x**3 plt.plot( *zip(*data), label=r'ShRec3D data points', linestyle='None', marker='h' ) plt.plot(x, func(x), label=r'$ %.0e \cdot x^3$' % const) plt.title(r'Complexity ($\in \Theta\left(x^3\right)$) visualization of ShRec3D') plt.xlabel('loci number') plt.ylabel('execution time (seconds)') plt.legend(loc='best') plt.savefig('time_comparison.png', dpi=300, bbox_inches='tight') plt.show()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def example3():\n arrive_time=example2() # Get packets arrive time using example1\n time_series.plot_time_series(arrive_time) # Plot time series using packets arrive time", "def _figure_3():\n\n dataset_id = 3\n pkl_file = _pkl_file_path(dataset_id)\n with open(pkl_file, 'rb...
[ "0.65714025", "0.64165264", "0.64107704", "0.63405186", "0.631422", "0.6297773", "0.6248544", "0.615319", "0.60817546", "0.607438", "0.60634214", "0.60446906", "0.60240567", "0.601342", "0.6000441", "0.5957452", "0.5925133", "0.59175897", "0.5905955", "0.5905455", "0.5882972"...
0.6902801
0
Generate new data if directory is given, otherwise only try to plot existing data
def main(): data_file = 'shrec_timer.json' if len(sys.argv) == 2: generate_data(data_file, sys.argv[1]) plot_data(data_file)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def plot_dir(main):\n try:\n wd = str(main.lineEdit_8.text())\n if wd == '':\n main.msg(\"Error \"+errorPath+\"plot_dir: Must choose directory first\")\n return\n for fi in os.listdir(wd):\n dataPath = os.path.join(wd, fi)\n main.msg(\"Plotting \"...
[ "0.61566585", "0.61496145", "0.59893596", "0.5787162", "0.57870215", "0.5718453", "0.57132596", "0.56776357", "0.56348133", "0.56186616", "0.56014526", "0.5600899", "0.55958813", "0.55899835", "0.5586797", "0.555859", "0.5540925", "0.55300957", "0.552718", "0.55126595", "0.54...
0.5090048
98
Push the item in the front of the deque
def enqueue_front(self, item): self._items.insert(0, item)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def push(self, item):\n super().add_item_to_front(item)", "def push_front(self, e):\n if(self.size_ >= self.capacity_):#If our Deque is full we need to resize it first\n self.resize_front()\n self.data_[self.front_]= e#New Front\n self.size_+=1\n # print(\...
[ "0.7934391", "0.79339534", "0.7679568", "0.74891657", "0.74482065", "0.7417094", "0.7404675", "0.7363072", "0.7294415", "0.72747624", "0.7242487", "0.7242487", "0.72093624", "0.71174246", "0.707182", "0.7052566", "0.70389926", "0.70353955", "0.701858", "0.70012", "0.6997899",...
0.8026828
0
Push the item in the end of the deque
def enqueue_rear(self, item): self._items.append(item)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def push(self, val):\r\n return self.deque.append(val)", "def push(self, item):\n self._tail_iters.append(iter([item]))", "def push(self, item):\n\t\tself.top+=1;\n\t\tself.arr.insert(self.top, item);", "def push(self, item):\n if len(self._data) == self.MAX_SIZE:\n # full we ...
[ "0.77419525", "0.7716661", "0.75581473", "0.74057865", "0.73719305", "0.7366627", "0.7267218", "0.7221633", "0.7205309", "0.71838725", "0.71715564", "0.716415", "0.716415", "0.71557266", "0.7145821", "0.7130648", "0.7110882", "0.71088755", "0.7106317", "0.7062142", "0.7058204...
0.6802287
55
Pop the item in the front of the deque. Raise IndexError if the deque is empty.
def dequeue_front(self): try: return self._items.pop(0) except: raise IndexError('The deque is empty')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def pop_front(self):\n # set temp to deque's front for return\n temp = self.front\n # if deque is empty\n if self.size == 0:\n # raise IndexError\n raise IndexError()\n # if deque has one element\n elif self.size == 1:\n # empty the deque c...
[ "0.7918459", "0.7688422", "0.7626735", "0.76226133", "0.7617293", "0.7505443", "0.7500941", "0.7439415", "0.740664", "0.7395839", "0.73582757", "0.73489094", "0.7289233", "0.7263663", "0.7241764", "0.724081", "0.7221747", "0.71959555", "0.7189014", "0.7156972", "0.7149371", ...
0.81932133
0
Pop the item in the end of the deque. Raise IndexError if the deque is empty.
def dequeue_rear(self): try: return self._items.pop() except: raise IndexError('The deque is empty')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def pop(self):\r\n try:\r\n return self.pop_from_deque()\r\n except IndexError:\r\n return None", "def pop(self):\n try:\n return self._values.pop()\n except IndexError:\n raise IndexError('Cannot pop from empty deque.')", "def pop(self):\...
[ "0.77415574", "0.76645964", "0.76196545", "0.7619073", "0.74518174", "0.73892117", "0.7347819", "0.73183465", "0.72986156", "0.7289281", "0.7255884", "0.7225652", "0.721948", "0.721712", "0.72019696", "0.71845245", "0.7174988", "0.7146664", "0.7139901", "0.71300745", "0.71008...
0.7715742
1
Return True if the deque is empty.
def is_empty(self): return len(self._items) == 0
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def is_empty(self):\n return len(self.deque) == 0", "def empty(self) -> bool:\n return len(self._deque) == 0", "def empty(self) -> bool:\n return self.que == []", "def is_empty(self) -> bool:\n return self.peek(1) == []", "def is_empty(self):\n return len(self.the_queue) ...
[ "0.92402023", "0.9101068", "0.86111414", "0.8478603", "0.8439654", "0.83985174", "0.83917534", "0.83707136", "0.83707136", "0.8296455", "0.8283977", "0.8280236", "0.8280236", "0.8274057", "0.82652426", "0.8263581", "0.8238711", "0.8222698", "0.82093155", "0.82078135", "0.8205...
0.0
-1
Return the length of the deque.
def __len__(self): return len(self._items)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __len__(self):\n return len(self.deque)", "def __len__(self):\r\n return len(self.deque)", "def __len__(self) -> int:\n return len(self._data_queue)", "def __len__(self):\n return len(self.cumulative_length)", "def length(self):\n return len(self.container)", "def _...
[ "0.86527514", "0.86125225", "0.74804485", "0.7163367", "0.7139454", "0.7136019", "0.7052766", "0.7052766", "0.70522916", "0.7034566", "0.7034566", "0.70223486", "0.70223486", "0.7014049", "0.7014049", "0.7014049", "0.7014049", "0.70028573", "0.6997984", "0.6995006", "0.698651...
0.0
-1
Return the representation of the deque
def __repr__(self): return "Front -> " + repr(self._items) + " <- Rear"
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __repr__(self):\n return 'Deque([{0}])'.format(','.join(str(item) for item in self))", "def __repr__(self):\n return 'Deque([{0}])'.format(','.join(str(item) for item in self))", "def __str__(self):\n return str(self.deque)", "def __repr__(self):\n return 'Deque({})'.format(se...
[ "0.82023656", "0.82023656", "0.7984831", "0.7795593", "0.6640371", "0.6613635", "0.65820986", "0.64977366", "0.6457218", "0.64357615", "0.6344762", "0.6335218", "0.63308865", "0.6327256", "0.6327256", "0.6296222", "0.62726325", "0.62721485", "0.6218142", "0.61443806", "0.6133...
0.60730755
29
Returns the full path for a relative path
def relative_path(__file__, path): return os.path.abspath(os.path.join(os.path.dirname(__file__), path))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_relative_path(path: str):\n return os.path.relpath(path, get_project_root())", "def relative(self, path):\n return re.sub(self.path_regex, '', path).lstrip(os.sep)", "def get_path(relative_path=None):\n\n root_path = os.path.dirname(os.path.dirname(__file__))\n\n if relative_path is Non...
[ "0.8088731", "0.8012601", "0.7939858", "0.7906186", "0.78002477", "0.77519965", "0.77290946", "0.76823074", "0.7662462", "0.76563984", "0.76526093", "0.76337326", "0.7589994", "0.7561121", "0.7560908", "0.7560908", "0.7560908", "0.7560908", "0.7560908", "0.755059", "0.7486504...
0.7933692
3
Returns an array of full paths for a relative path with globs
def expand_path(__file__, path_with_globs): return glob.glob(relative_path(__file__, path_with_globs))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_paths(file_path):\n return glob(path.join(file_path, '*'))", "def glob(path: str) -> list[str]:\n fs, relative_path = url_to_fs(path)\n return cast(list[str], fs.glob(relative_path))", "def get_paths(pattern):\n if not in_source_tree:\n pattern = '../' + pattern\n\n files = glob.g...
[ "0.78830874", "0.74837524", "0.73281705", "0.7273039", "0.7240019", "0.7209716", "0.7107599", "0.7090277", "0.70357305", "0.69723034", "0.69063663", "0.68546826", "0.68303967", "0.6820884", "0.68082666", "0.6802939", "0.6779365", "0.67275643", "0.6712124", "0.67007935", "0.66...
0.7740084
1
Returns full paths for a series relative paths with globs
def expand_paths(__file__, paths_with_globs): if isinstance(paths_with_globs, str): return expand_path(__file__, paths_with_globs) else: expanded_globs = [ expand_path(__file__, path) for path in paths_with_globs ] # Flatten return list(itertools.chain.from_iterable(expanded_globs))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def expand_path(__file__, path_with_globs):\n return glob.glob(relative_path(__file__, path_with_globs))", "def get_paths(file_path):\n return glob(path.join(file_path, '*'))", "def glob_paths(self, name, source, pattern, test_data=()):\n assert isinstance(source, config_types.Path)\n result = self...
[ "0.6839766", "0.66446954", "0.66331553", "0.6385926", "0.6355523", "0.62292236", "0.6208013", "0.60792667", "0.6063598", "0.60258174", "0.6019291", "0.6018035", "0.60155195", "0.60125214", "0.60032433", "0.5999792", "0.5935156", "0.59334236", "0.5899728", "0.5886695", "0.5886...
0.6378503
4
One solution would be to do an inorder traversal and sum the values along the way (or just recursive sum along the tree). => O(N) but in case the range [lo,hi] is small, this is wasteful.
def rangeSumBST(self, root: TreeNode, lo: int, hi: int) -> int: def visit(node: TreeNode) -> int: if not node: return 0 if node.val < lo: return visit(node.right) elif hi < node.val: return visit(node.left) else: return node.val + visit(node.left) + visit(node.right) return visit(root)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def getSum(root, level, h):\n if root == None:\n return\n \n h[level] = root.data\n \n getSum(root.left, level+1, h)\n getSum(root.right, level+1, h)", "def sum_tree(t):\n \"*** YOUR CODE HERE ***\"\n if is_leaf(t):\n return entry(t)\n total = entry(t)\n for subtree in...
[ "0.6729964", "0.66551703", "0.64795923", "0.6404012", "0.63082033", "0.6260686", "0.6228508", "0.6228508", "0.6228508", "0.6228508", "0.6228508", "0.61943024", "0.6178861", "0.61783046", "0.61113393", "0.6107871", "0.6049075", "0.604374", "0.6043456", "0.603119", "0.5992261",...
0.7450429
0
Loops over arrays in the arrays_iterator and evaluates the cut_function at the cut_values. Returns a list of efficiences, passed events/objects, and total events/objects. cut_function is expected to return a tuple (n_pass, n_total) with input (arrays, cut_value).
def get_eff(arrays_iterator, cut_function, cut_values): n_cuts = len(cut_values) n_total = np.zeros(n_cuts) n_pass = np.zeros(n_cuts) for arrays, dataset in arrays_iterator: weight = dataset.get_weight() for i_cut, cut in enumerate(cut_values): this_n_pass, this_n_total = cut_function(arrays, cut) n_total[i_cut] += weight * this_n_total n_pass[i_cut] += weight * this_n_pass # Basically n_pass / n_total, but returns 0 if n_total has a 0 somewhere eff = np.divide(n_pass, n_total, out=np.zeros_like(n_pass), where=n_total!=0) return eff, n_pass, n_total
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __cut_arrays(data_array, maximum_time, arrays_to_cut):\n\n try:\n begin_time = data_array[arrays_to_cut[0]][0][0]\n end_time = data_array[arrays_to_cut[0]][0][-1]\n delta_time = (\n data_array[arrays_to_cut[0]][0][1]\n ...
[ "0.56539094", "0.52569467", "0.5236992", "0.5231127", "0.5104325", "0.5093013", "0.5085143", "0.5064352", "0.4961732", "0.49305794", "0.49301794", "0.4917983", "0.48857465", "0.4866057", "0.48591626", "0.48007303", "0.47892055", "0.47815204", "0.4772867", "0.47728154", "0.476...
0.77393055
0
Expects a list of signals and a list of bkgs (Dataset objects), and a cut_function and cut_values.
def roccurve(signals, bkgs, cut_function, cut_values): eff_sig, n_pass_sig, n_total_sig = get_eff(svjflatanalysis.iterate(signals), cut_function, cut_values) eff_bkg, n_pass_bkg, n_total_bkg = get_eff(svjflatanalysis.iterate(bkgs), cut_function, cut_values) return eff_sig, eff_bkg, n_pass_sig, n_pass_bkg, n_total_sig, n_total_bkg
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_cuts(data, args, verbose):\n\n if args['experiment']['cut_finding'] == CutFinding.features:\n\n values = (data.xs == True).T\n return Cuts(values=values)\n\n if args['experiment']['cut_finding'] == CutFinding.binning:\n\n values, names = binning(xs=data.xs,\n ...
[ "0.55629873", "0.5544333", "0.54654413", "0.53966707", "0.5293295", "0.5175403", "0.5154368", "0.50937045", "0.5059277", "0.5027049", "0.50096345", "0.49881732", "0.49795693", "0.4978986", "0.49496424", "0.4933744", "0.4921186", "0.49105307", "0.49013457", "0.48807377", "0.48...
0.6186934
0
Basic plotting style for a single roccurve, based on multiple signal and bkgs samples. Expects an ax object to be given, this function is not standalone
def plot_roccurve(signals, bkgs, cut_function, cut_values, ax): eff_sig, eff_bkg, n_pass_sig, n_pass_bkg, n_total_sig, n_total_bkg = roccurve(signals, bkgs, cut_function, cut_values) return _draw_roccurve(eff_sig, eff_bkg, cut_values, ax)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def plot_single_roccurve(signals, bkgs, cut_function, cut_values, ax=None):\n # Get a default ax if none is given\n if ax is None:\n import matplotlib.pyplot as plt\n fig = plt.figure(figsize=(8,8))\n ax = fig.gca()\n # Plot the base line\n ax.plot([0.0,1.0], [0.0,1.0], linestyle='...
[ "0.7304228", "0.6550803", "0.6549476", "0.63300985", "0.631119", "0.6244181", "0.614794", "0.61184627", "0.611656", "0.6109644", "0.60801786", "0.6004628", "0.59819883", "0.595841", "0.5869724", "0.58628213", "0.5857794", "0.5838128", "0.5826333", "0.58193934", "0.5785737", ...
0.6976897
1
Main routine for plotting a single roccurve
def plot_single_roccurve(signals, bkgs, cut_function, cut_values, ax=None): # Get a default ax if none is given if ax is None: import matplotlib.pyplot as plt fig = plt.figure(figsize=(8,8)) ax = fig.gca() # Plot the base line ax.plot([0.0,1.0], [0.0,1.0], linestyle='--', color='xkcd:gray') # Plot the single roccurve line = plot_roccurve(signals, bkgs, cut_function, cut_values, ax=ax) line.set_label(bkgs[0].get_category()) # Plot settings ax.set_xlim(0.0, 1.05) ax.set_ylim(0.0, 1.05) ax.set_ylabel('Signal eff.', fontsize=DEFAULT_FONTSIZE) ax.set_xlabel('Bkg eff.', fontsize=DEFAULT_FONTSIZE) ax.legend(fontsize=DEFAULT_FONTSIZE) return ax
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def plot_roccurve(signals, bkgs, cut_function, cut_values, ax):\n eff_sig, eff_bkg, n_pass_sig, n_pass_bkg, n_total_sig, n_total_bkg = roccurve(signals, bkgs, cut_function, cut_values)\n return _draw_roccurve(eff_sig, eff_bkg, cut_values, ax)", "def roc_curve(model, X_train, y_train, X_test, y_test, train=...
[ "0.7373159", "0.7010362", "0.6923584", "0.6742388", "0.66598225", "0.65181124", "0.6510751", "0.6500974", "0.64510316", "0.6430082", "0.6429037", "0.6416149", "0.63878846", "0.63805693", "0.6365573", "0.63452685", "0.63356173", "0.6325078", "0.6313239", "0.6286145", "0.628332...
0.7444382
0
Plots the roccurve per background category. Assumes signals are all datasets of the same signal.
def plot_roccurves_per_bkg(signals, bkgs, cut_function, cut_values, ax=None): # Get a default ax if none is given if ax is None: import matplotlib.pyplot as plt fig = plt.figure(figsize=(8,8)) ax = fig.gca() # Get signal efficieny once eff_sig, n_pass_sig, n_total_sig = get_eff(svjflatanalysis.iterate(signals), cut_function, cut_values) # Perform some basic plotting setup ax.plot([0.0,1.0], [0.0,1.0], linestyle='--', color='xkcd:gray') ax.set_xlim(0.0, 1.05) ax.set_ylim(0.0, 1.05) ax.set_ylabel('Signal eff.', fontsize=DEFAULT_FONTSIZE) ax.set_xlabel('Bkg eff.', fontsize=DEFAULT_FONTSIZE) ax.legend(fontsize=DEFAULT_FONTSIZE) # Then efficiencies per bkg category (ttjets, qcd, ...) bkg_categories = list(set([ b.get_category() for b in bkgs ])) bkg_categories.sort() lines = {} for bkg_cat in bkg_categories: # Get Datasets that have this category bkgs_this_cat = [ b for b in bkgs if b.get_category() == bkg_cat ] # Compute efficiency in this category eff_bkg, n_pass_bkg, n_total_bkg = get_eff(svjflatanalysis.iterate(bkgs_this_cat), cut_function, cut_values) # Draw roccurve for this category line = _draw_roccurve(eff_sig, eff_bkg, cut_values, ax) line.set_label(bkg_cat) # Save this line in a dict for potential outputting/modifying lines[bkg_cat] = line return ax
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def Make_Binned_ANN_ROC_Curves(title,Signal_title,Background_title,bins,log=False):\n #hsv = plt.get_cmap('hsv')\n #color = hsv(np.linspace(0,1.0,len(bins)-1))\n #color = ['b', 'g', 'r', 'c', 'm', 'y']\n if len(bins)<=6:\n color = ['red','green','blue','orange','brown']\n...
[ "0.6300367", "0.61281216", "0.6091596", "0.60315055", "0.59216946", "0.5900686", "0.5838783", "0.5722116", "0.55689114", "0.5540599", "0.5525509", "0.5439819", "0.54131126", "0.540916", "0.5403627", "0.53976256", "0.53795195", "0.53759587", "0.53333044", "0.53327256", "0.5321...
0.6172026
1
Fills a coffea.hist.Hist for a single distribution. Takes a list of Dataset objects, and a function `get_array` that should return a numpylike array when given an arrays object. Also requires a string `name` to know in which hist to fill it
def hist_single_distribution( arrays_iterator, get_array, varname='somevar', vartitle=None, distrname='somedistr', distrtitle=None, hist=None, left=-1., right=1., nbins=50 ): if hist is None: import coffea.hist vartitle = varname if vartitle is None else vartitle hist = coffea.hist.Hist( "Count", coffea.hist.Bin(varname, vartitle, nbins, left, right), coffea.hist.Cat('label', varname), ) for arrays, dataset in arrays_iterator: print(dataset.get_weight(), get_array(arrays)) hist.fill(label=distrname, weight=dataset.get_weight(), **{varname: get_array(arrays)}) return hist
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def Fill(self, *args, **kwargs):\n self._varexp = kwargs.get(\"varexp\")\n self._cuts = kwargs.get(\"cuts\", [])\n self._weight = kwargs.get(\"weight\", \"1\")\n if len(args) == 1 and isinstance(args[0], (str, unicode)):\n IOManager.FillHistogram(self, args[0], **kwargs)\n ...
[ "0.6048329", "0.59464675", "0.5933661", "0.5887746", "0.58725816", "0.57364476", "0.5714648", "0.5633713", "0.56145376", "0.5604098", "0.5588111", "0.5546958", "0.554628", "0.55081207", "0.549454", "0.54831433", "0.5475002", "0.5430655", "0.54043436", "0.53916496", "0.5367704...
0.73073107
0
Decorator for posttrigger cuts
def apply_trigger_first(cut_fn): def wrapped(arrays, cut): arrays = svjflatanalysis.arrayutils.apply_trigger_and_jetpt550(arrays, 2018) return cut_fn(arrays, cut) return wrapped
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _post_hooks(self):", "def onCut(self):\n pass", "def triggered(self, *args, **kwargs): # real signature unknown\n pass", "def __call__(self, trigger, type, event):", "def post_run_func_checked(driver: HammerDriver) -> None:\n if post_run_func is not None:\n post_...
[ "0.58980626", "0.5877055", "0.5827994", "0.5693356", "0.5606729", "0.55755043", "0.55027366", "0.5471056", "0.5466376", "0.5457004", "0.5404508", "0.5376659", "0.5318747", "0.53063715", "0.5298729", "0.5287767", "0.52239275", "0.5207728", "0.5186563", "0.51724184", "0.5144093...
0.5477666
7
Takes a cut function and tries to return a title for it
def get_title(fn): title = fn.name if hasattr(fn, 'name') else fn.__name__ title = title.replace('_cut_function','') suffix = [] # if 'JetsAK15_subleading_' in title: # suffix.append(r'$j^{\mathrm{AK15}}_{\mathrm{subl}}$') title = title.replace('JetsAK15_subleading_', '').replace('subleading_', '') if hasattr(fn, 'left'): suffix.append('({:.0f} < {} < {:.0f})'.format(fn.left, svjflatanalysis.utils.get_title('mt'), fn.right)) # Transform variable name to title stirng title = svjflatanalysis.utils.get_title(title) if hasattr(fn, 'operator'): title += ' ' + fn.operator + ' cut' # Add the suffix title += ' ' + ' '.join(suffix) return title
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_title():", "def make_title(words):", "def getTitle(test:str) -> str:\n return test[5:].strip()", "def PROPER(text):\n return text.title()", "def test_title(names):", "def title(value):\r\n title_word = lambda w: w if RE_UPPERCASE.search(w) else old_title(w)\r\n return re.sub('(\\S+)', l...
[ "0.70298654", "0.6522897", "0.6284885", "0.61326414", "0.60484356", "0.6029634", "0.6029412", "0.6012764", "0.59930116", "0.598101", "0.5968265", "0.59671205", "0.5940044", "0.59055644", "0.58885586", "0.5842299", "0.5840335", "0.58212703", "0.58197415", "0.5815073", "0.57954...
0.78655964
0
The Windows version of base.processInterrupt Note! This doesn't work terribly well with a lot of processes.
def processInterrupt(uPid): try: # pylint: disable=no-member win32console.GenerateConsoleCtrlEvent(win32con.CTRL_BREAK_EVENT, uPid); #GenerateConsoleCtrlEvent = ctypes.windll.kernel32.GenerateConsoleCtrlEvent #rc = GenerateConsoleCtrlEvent(1, uPid); #reporter.log('GenerateConsoleCtrlEvent -> %s' % (rc,)); fRc = True; except: reporter.logXcpt('uPid=%s' % (uPid,)); fRc = False; return fRc;
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def print_process_interrupted(exc: \"KeyboardInterrupt\"):\n _print(f\"\\nInterrupted. {exc}\")", "def stop(self):\n # trying this instead of SIGTERM\n # http://stackoverflow.com/a/6659191/3380530\n # self._process.send_signal(SIGINT)\n # Or not. SIGINT doesn't exist on Windows\n ...
[ "0.63215697", "0.5775936", "0.57586217", "0.5744429", "0.5740155", "0.5561683", "0.5558111", "0.5497653", "0.5479278", "0.54543006", "0.54173666", "0.541069", "0.5367018", "0.53545386", "0.5337776", "0.53182185", "0.5305628", "0.52493083", "0.522078", "0.5160787", "0.51373094...
0.67192686
0
Posts a WM_CLOSE message to the specified thread.
def postThreadMesssageClose(uTid): fRc = False; try: win32api.PostThreadMessage(uTid, win32con.WM_CLOSE, 0, 0); # pylint: disable=no-member fRc = True; except: reporter.logXcpt('uTid=%s' % (uTid,)); return fRc;
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def postThreadMesssageQuit(uTid):\n fRc = False;\n try:\n win32api.PostThreadMessage(uTid, win32con.WM_QUIT, 0x40010004, 0); # DBG_TERMINATE_PROCESS # pylint: disable=no-member\n fRc = True;\n except:\n reporter.logXcpt('uTid=%s' % (uTid,));\n return fRc;", "def close(self):\n\n...
[ "0.65258104", "0.647808", "0.6194087", "0.6059737", "0.6028249", "0.5981587", "0.59607416", "0.5957837", "0.5957837", "0.5957837", "0.5957837", "0.5846168", "0.58057123", "0.57886297", "0.574851", "0.5722326", "0.5718076", "0.57172066", "0.5709524", "0.5701404", "0.5693792", ...
0.7335398
0
Posts a WM_QUIT message to the specified thread.
def postThreadMesssageQuit(uTid): fRc = False; try: win32api.PostThreadMessage(uTid, win32con.WM_QUIT, 0x40010004, 0); # DBG_TERMINATE_PROCESS # pylint: disable=no-member fRc = True; except: reporter.logXcpt('uTid=%s' % (uTid,)); return fRc;
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def postThreadMesssageClose(uTid):\n fRc = False;\n try:\n win32api.PostThreadMessage(uTid, win32con.WM_CLOSE, 0, 0); # pylint: disable=no-member\n fRc = True;\n except:\n reporter.logXcpt('uTid=%s' % (uTid,));\n return fRc;", "def quit(self, mess...
[ "0.6247474", "0.6094396", "0.58557373", "0.57782835", "0.57772505", "0.5738489", "0.5506181", "0.5447505", "0.5402179", "0.5354124", "0.5341512", "0.5266031", "0.5251934", "0.52516365", "0.52507806", "0.5238659", "0.5238613", "0.5237029", "0.5236302", "0.5228808", "0.522184",...
0.7239463
0
The Windows version of base.processTerminate
def processTerminate(uPid): # pylint: disable=no-member fRc = False; try: hProcess = win32api.OpenProcess(win32con.PROCESS_TERMINATE, False, uPid); except: reporter.logXcpt('uPid=%s' % (uPid,)); else: try: win32process.TerminateProcess(hProcess, 0x40010004); # DBG_TERMINATE_PROCESS fRc = True; except: reporter.logXcpt('uPid=%s' % (uPid,)); hProcess.Close(); #win32api.CloseHandle(hProcess) return fRc;
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def terminate(process):\n\n def terminate_win(process):\n import win32process\n return win32process.TerminateProcess(process._handle, -1)\n\n def terminate_nix(process):\n import os\n import signal\n return os.kill(process.pid, signal.SIGTERM)\n\n terminate_default = ter...
[ "0.7198852", "0.6692012", "0.65206206", "0.65205437", "0.63315195", "0.6170306", "0.6133239", "0.60618556", "0.6033226", "0.60128194", "0.5967718", "0.5818627", "0.5798409", "0.57777244", "0.57758033", "0.5771237", "0.57694376", "0.57538503", "0.57486963", "0.5722882", "0.571...
0.62610066
5
The Windows version of base.processKill
def processKill(uPid): return processTerminate(uPid);
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def kill(self):\r\n try:\r\n if self.process:\r\n self.process.kill()\r\n self.process.wait()\r\n except WindowsError:\r\n # kill may not be available under windows environment\r\n pass", "def _KillProcess(self, pid):\n if sys.platfo...
[ "0.726238", "0.6810974", "0.6760341", "0.6577071", "0.63967526", "0.63236195", "0.63068575", "0.6289544", "0.6275234", "0.6238362", "0.62296176", "0.6153977", "0.6149264", "0.6146983", "0.6144532", "0.61424387", "0.6141811", "0.60982275", "0.60788274", "0.6070093", "0.6050515...
0.74643934
0
The Windows version of base.processExists
def processExists(uPid): # We try open the process for waiting since this is generally only forbidden in a very few cases. try: hProcess = win32api.OpenProcess(win32con.SYNCHRONIZE, False, uPid); # pylint: disable=no-member except pywintypes.error as oXcpt: # pylint: disable=no-member if oXcpt.winerror == winerror.ERROR_INVALID_PARAMETER: return False; if oXcpt.winerror != winerror.ERROR_ACCESS_DENIED: reporter.logXcpt('uPid=%s oXcpt=%s' % (uPid, oXcpt)); return False; reporter.logXcpt('uPid=%s oXcpt=%s' % (uPid, oXcpt)); except Exception as oXcpt: reporter.logXcpt('uPid=%s' % (uPid,)); return False; else: hProcess.Close(); #win32api.CloseHandle(hProcess) return True;
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def win():\n if platform.system() in WINDOWS:\n return True\n return False", "def is_windows():\n if os.name == \"nt\":\n return True\n return False", "def _on_windows() -> bool:\n return os.name == \"nt\"", "def is_windows():\n return os.name == \"nt\"", "def os...
[ "0.68071175", "0.6609007", "0.6606889", "0.6557855", "0.65289325", "0.65233135", "0.6510651", "0.649545", "0.64642906", "0.64525825", "0.63975483", "0.6390339", "0.6355812", "0.63475555", "0.6226767", "0.6114815", "0.61133957", "0.6084441", "0.6084441", "0.6084441", "0.605266...
0.6012618
23
The Windows version of base.processCheckPidAndName
def processCheckPidAndName(uPid, sName): fRc = processExists(uPid); if fRc is True: try: from win32com.client import GetObject; # pylint: disable=F0401 oWmi = GetObject('winmgmts:'); aoProcesses = oWmi.InstancesOf('Win32_Process'); for oProcess in aoProcesses: if long(oProcess.Properties_("ProcessId").Value) == uPid: sCurName = oProcess.Properties_("Name").Value; reporter.log2('uPid=%s sName=%s sCurName=%s' % (uPid, sName, sCurName)); sName = sName.lower(); sCurName = sCurName.lower(); if os.path.basename(sName) == sName: sCurName = os.path.basename(sCurName); if sCurName == sName \ or sCurName + '.exe' == sName \ or sCurName == sName + '.exe': fRc = True; break; except: reporter.logXcpt('uPid=%s sName=%s' % (uPid, sName)); return fRc;
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def exe_match(expected_name):\n # expected_name = expected_name.encode('ascii')\n def f(win):\n n = conv(win.process_name)\n return n == expected_name\n return f", "def check_process_for_pid(pid, process_name):\n pid = int(pid)\n proc = psutil.Process(pid)\n return proc.name() == ...
[ "0.6427718", "0.627324", "0.6176712", "0.6168791", "0.61007553", "0.6070687", "0.60217047", "0.60116947", "0.5992538", "0.5970814", "0.59083015", "0.5876524", "0.58620656", "0.5821407", "0.5805197", "0.57778066", "0.5776582", "0.5765953", "0.57581586", "0.57557404", "0.573748...
0.71424156
0
Returns a (pid, handle, tid) tuple on success. (1, None) on failure (logged).
def processCreate(sName, asArgs): # Construct a command line. sCmdLine = ''; for sArg in asArgs: if sCmdLine == '': sCmdLine += '"'; else: sCmdLine += ' "'; sCmdLine += sArg; sCmdLine += '"'; # Try start the process. # pylint: disable=no-member dwCreationFlags = win32con.CREATE_NEW_PROCESS_GROUP; oStartupInfo = win32process.STARTUPINFO(); try: (hProcess, hThread, uPid, uTid) = win32process.CreateProcess(sName, sCmdLine, # CommandLine None, # ProcessAttributes None, # ThreadAttibutes 1, # fInheritHandles dwCreationFlags, None, # Environment None, # CurrentDirectory. oStartupInfo); except: reporter.logXcpt('sName="%s" sCmdLine="%s"' % (sName, sCmdLine)); return (-1, None, -1); # Dispense with the thread handle. try: hThread.Close(); # win32api.CloseHandle(hThread); except: reporter.logXcpt(); # Try get full access to the process. try: hProcessFullAccess = win32api.DuplicateHandle( win32api.GetCurrentProcess(), hProcess, win32api.GetCurrentProcess(), win32con.PROCESS_TERMINATE | win32con.PROCESS_QUERY_INFORMATION | win32con.SYNCHRONIZE | win32con.DELETE, False, 0); hProcess.Close(); # win32api.CloseHandle(hProcess); hProcess = hProcessFullAccess; except: reporter.logXcpt(); reporter.log2('processCreate -> %#x, hProcess=%#x' % (uPid, hProcess,)); return (uPid, hProcess, uTid);
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_pid_tid():\n # noinspection PyBroadException\n try:\n return \"(pid=%s) (tid=%s)\" % (\n six.text_type(os.getpid()),\n six.text_type(six.moves._thread.get_ident()),\n )\n except Exception:\n return \"(pid=%s) (tid=Unknown)\" % (six.text_type(os.getpid()))...
[ "0.62232584", "0.60516447", "0.5674809", "0.5673514", "0.55722266", "0.5526226", "0.548995", "0.548995", "0.5366021", "0.53371155", "0.53033525", "0.52723205", "0.5250137", "0.5207038", "0.5207038", "0.51987016", "0.5190687", "0.51559055", "0.5148344", "0.5127615", "0.5126017...
0.0
-1
Polls the process handle to see if it has finished (True) or not (False).
def processPollByHandle(hProcess): try: dwWait = win32event.WaitForSingleObject(hProcess, 0); # pylint: disable=no-member except: reporter.logXcpt('hProcess=%s %#x' % (hProcess, hProcess,)); return True; return dwWait != win32con.WAIT_TIMEOUT; #0x102; #
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def is_done(self) -> bool:\n is_done = self._process.poll() is not None\n\n return is_done", "def check_finish(self):\r\n return not self.proc.is_alive()", "def _proc_is_alive(self):\n if self._proc is None:\n return False\n\n return self._proc.poll() is None", "...
[ "0.7556547", "0.7349174", "0.7095015", "0.70851445", "0.6985076", "0.6930272", "0.68558294", "0.6798407", "0.6784719", "0.6780242", "0.6755652", "0.66265666", "0.6621324", "0.658647", "0.65851134", "0.65569645", "0.6533104", "0.6499808", "0.64819217", "0.6480319", "0.6417932"...
0.7235497
2
Logs windows memory stats.
def logMemoryStats(): class MemoryStatusEx(ctypes.Structure): """ MEMORYSTATUSEX """ kaFields = [ ( 'dwLength', ctypes.c_ulong ), ( 'dwMemoryLoad', ctypes.c_ulong ), ( 'ullTotalPhys', ctypes.c_ulonglong ), ( 'ullAvailPhys', ctypes.c_ulonglong ), ( 'ullTotalPageFile', ctypes.c_ulonglong ), ( 'ullAvailPageFile', ctypes.c_ulonglong ), ( 'ullTotalVirtual', ctypes.c_ulonglong ), ( 'ullAvailVirtual', ctypes.c_ulonglong ), ( 'ullAvailExtendedVirtual', ctypes.c_ulonglong ), ]; _fields_ = kaFields; # pylint: disable=invalid-name def __init__(self): super(MemoryStatusEx, self).__init__(); self.dwLength = ctypes.sizeof(self); try: oStats = MemoryStatusEx(); ctypes.windll.kernel32.GlobalMemoryStatusEx(ctypes.byref(oStats)); except: reporter.logXcpt(); return False; reporter.log('Memory statistics:'); for sField, _ in MemoryStatusEx.kaFields: reporter.log(' %32s: %s' % (sField, getattr(oStats, sField))); return True;
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def print_memory_stats(location_tag=\"undef\"):\n try:\n import psutil\n p = psutil.Process(os.getpid())\n rm, vm = p.get_memory_info()\n print \"MEM_STAT (%s) rm=%s, vm=%s\" % (location_tag, rm, vm)\n except ImportError:\n print \"psutil module not available\"", "def log...
[ "0.70418596", "0.6867534", "0.643289", "0.6419121", "0.6389766", "0.6379068", "0.59949344", "0.5943068", "0.5942947", "0.5925752", "0.5925409", "0.5925409", "0.5925409", "0.5925409", "0.5925409", "0.5925409", "0.5925409", "0.59162056", "0.58963734", "0.5882967", "0.5870274", ...
0.798583
0
Calls HeapValidate(GetProcessHeap(), 0, NULL);
def checkProcessHeap(): # Get the process heap. try: hHeap = ctypes.windll.kernel32.GetProcessHeap(); except: reporter.logXcpt(); return False; # Check it. try: fIsOkay = ctypes.windll.kernel32.HeapValidate(hHeap, 0, None); except: reporter.logXcpt(); return False; if fIsOkay == 0: reporter.log('HeapValidate failed!'); # Try trigger a dump using c:\utils\procdump64.exe. from common import utils; iPid = os.getpid(); asArgs = [ 'e:\\utils\\procdump64.exe', '-ma', '%s' % (iPid,), 'c:\\CrashDumps\\python.exe-%u-heap.dmp' % (iPid,)]; if utils.getHostArch() != 'amd64': asArgs[0] = 'c:\\utils\\procdump.exe' reporter.log('Trying to dump this process using: %s' % (asArgs,)); utils.processCall(asArgs); # Generate a crash exception. ctypes.windll.msvcrt.strcpy(None, None, 1024); return True;
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def precheck(self):\n if self.__memory_size is None:\n self.logger.exception(\"[Memory] Please set memory size.\")\n raise ArgsNotCorrect(\"Please set memory size.\")", "def test_func_heap(self):\n cmd = \"deref $_heap()\"\n target = _target(\"heap\")\n self.asse...
[ "0.5956563", "0.5820966", "0.5391256", "0.5279991", "0.52643716", "0.52414745", "0.5221388", "0.51908994", "0.5157347", "0.51529896", "0.5116927", "0.50405", "0.50393564", "0.5023326", "0.5012351", "0.5010514", "0.5008411", "0.50031483", "0.4997377", "0.49946463", "0.49777916...
0.76484793
0
Gets the process tree using Inductive Miner DirectlyFollows
def apply(log, parameters=None): if parameters is None: parameters = {} decreasingFactor = parameters[ "decreasingFactor"] if "decreasingFactor" in parameters else constants.DEFAULT_DEC_FACTOR activity_key = parameters[pm4_constants.PARAMETER_CONSTANT_ACTIVITY_KEY] if pm4_constants.PARAMETER_CONSTANT_ACTIVITY_KEY in parameters else xes.DEFAULT_NAME_KEY log = attributes_filter.filter_log_on_max_no_activities(log, max_no_activities=constants.MAX_NO_ACTIVITIES, parameters=parameters) filtered_log = auto_filter.apply_auto_filter(log, parameters=parameters) activities_count = attributes_filter.get_attribute_values(filtered_log, activity_key) activities = list(activities_count.keys()) start_activities = list(start_activities_filter.get_start_activities(filtered_log, parameters=parameters).keys()) end_activities = list(end_activities_filter.get_end_activities(filtered_log, parameters=parameters).keys()) dfg = dfg_factory.apply(filtered_log, parameters=parameters) dfg = clean_dfg_based_on_noise_thresh(dfg, activities, decreasingFactor * constants.DEFAULT_DFG_CLEAN_MULTIPLIER, parameters=parameters) tree = inductive_miner.apply_tree_dfg(dfg, parameters=parameters, activities=activities, start_activities=start_activities, end_activities=end_activities) parameters["format"] = "svg" gviz = pt_vis_factory.apply(tree, parameters=parameters) gviz_base64 = base64.b64encode(str(gviz).encode('utf-8')) return get_base64_from_gviz(gviz), None, "", "xes", activities, start_activities, end_activities, gviz_base64, [], "tree", "freq", None, "", activity_key
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _prog_nodes(self):\n\n self.arbor._grow_tree(self)\n my_node = self\n while my_node is not None:\n yield my_node\n ancestors = list(my_node.ancestors)\n if ancestors:\n my_node = my_node.arbor.selector(ancestors)\n else:\n ...
[ "0.58344984", "0.5687344", "0.5568197", "0.5373498", "0.536982", "0.5367479", "0.5362519", "0.53463984", "0.5334048", "0.53192425", "0.5254958", "0.5220183", "0.5207111", "0.5187977", "0.5185732", "0.5169036", "0.5158708", "0.5143826", "0.5143826", "0.5135224", "0.5132148", ...
0.0
-1
Runs the component. The Annual Total Savings,Annual Costs, Annual Net Benefit, NPV Benefits, NPV Costs, NPV Net Benefits, Benefit Cost Ratio, Levelized Cost of Energy, and Internal Rate of Return will all be calculated. There must be a known Heat Recovery project for this component to run.
def run (self, scalers = {'capital costs':1.0}): self.was_run = True self.reason = "OK" tag = self.cd['file id'].split('+') if len(tag) > 1 and tag[1] != 'transmission': self.was_run = False self.reason = "Not a transmission project." return if not self.cd["model electricity"]: self.was_run = False self.reason = "Electricity must be modeled to analyze "+\ "transmission. It was not for this community." return if np.isnan(float(self.comp_specs['distance to community'])): self.was_run = False self.reason = ("There are no communities within 30 miles with" " lower cost of electricity.") return self.calc_average_load() try: self.get_intertie_values() except ValueError: self.was_run = False self.reason = ("Could not find data on community to intertie to.") return self.calc_pre_intertie_generation() self.calc_intertie_offset_generation() if self.cd["model heating fuel"]: # change these below self.calc_lost_heat_recovery() # see NOTE* #~ return if self.cd["model financial"]: # AnnualSavings functions (don't need to write) self.get_diesel_prices() # change these below self.calc_capital_costs() self.calc_annual_electric_savings() self.calc_annual_heating_savings() # AnnualSavings functions (don't need to write) self.calc_annual_total_savings() self.calc_annual_costs(self.cd['interest rate'], scalers['capital costs']) self.calc_annual_net_benefit() self.calc_npv(self.cd['discount rate'], self.cd["current year"]) #~ print self.benefit_cost_ratio self.calc_levelized_costs(self.proposed_generation_cost)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def run (self, scalers = {'capital costs':1.0}):\n\n self.was_run = True\n self.reason = \"OK\"\n tag = self.cd['file id'].split('+')\n if len(tag) > 1 and tag[1] != 'wind':\n self.was_run = False\n self.reason = \"Not a Wind project\"\n return\n\n ...
[ "0.69873667", "0.64584976", "0.6378536", "0.63241583", "0.63009036", "0.62177217", "0.6204553", "0.6156644", "0.6136159", "0.6083111", "0.6052154", "0.6037782", "0.60245705", "0.60139", "0.5996558", "0.599415", "0.5984706", "0.59628487", "0.59623754", "0.59618145", "0.5958885...
0.68490684
1
Calculate the Average Diesel load of the current system Attributes
def calc_average_load (self): #~ self.generation = self.forecast.generation_by_type['generation diesel']\ #~ [self.start_year] self.average_load = \ self.forecast.yearly_average_diesel_load.ix[self.start_year]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def calc_average_load (self):\n if self.comp_specs['proposed capacity'] != UNKNOWN:\n self.average_load = None\n self.generation = self.forecast.generation['generation diesel']\\\n [self.start_year]\n self.average_load = \\\...
[ "0.7328244", "0.64413404", "0.6380107", "0.63506234", "0.6312014", "0.6299931", "0.618164", "0.6149521", "0.6113699", "0.608994", "0.6078722", "0.59945375", "0.5977917", "0.5886562", "0.5843336", "0.5759978", "0.5727464", "0.565973", "0.5629968", "0.56079954", "0.5549923", ...
0.7217842
1
Get values from the community being connected to (second community)
def get_intertie_values (self): #~ print self.new_intertie_data.get_item('community','model as intertie') if self.new_intertie_data is None: raise ValueError, "No community to intertie to" self.connect_to_intertie = \ self.new_intertie_data.get_item('community','model as intertie') self.intertie_generation_efficiency = \ self.new_intertie_data.get_item( 'community', 'diesel generation efficiency' ) it_diesel_prices = self.new_intertie_data.get_item( 'community', 'diesel prices' ) it_diesel_prices.index = it_diesel_prices.index.astype(int) #~ print it_diesel_prices.ix[self.start_year:self.end_year] self.intertie_diesel_prices = \ it_diesel_prices.ix[self.start_year:self.end_year].values.T[0]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _get_communities(self):\n return self.__communities", "def get_communities(self):\n return self._communities.values()", "def get_communalities(self):\n df_communalities = pd.DataFrame(self.fa.get_communalities()).set_index(self.df.columns)\n if self.verbose:\n print(f'Com...
[ "0.69425935", "0.636551", "0.6130402", "0.6092634", "0.60580534", "0.5994856", "0.58466315", "0.57885724", "0.5787836", "0.5702516", "0.5695605", "0.56887186", "0.5679699", "0.5653863", "0.56507945", "0.5644417", "0.5626879", "0.560844", "0.5607573", "0.55997175", "0.55808896...
0.0
-1
Calculate the generation offset by connecting a transmission line to the community to connect to. Attributes
def calc_intertie_offset_generation (self): self.generation = \ self.forecast.get_generation(self.start_year,self.end_year) dist = self.comp_specs['distance to community'] self.annual_transmission_loss = \ 1 - ( (1- (self.comp_specs['transmission loss per mile']/ 100.0)) ** dist) self.intertie_offset_generation = \ self.generation * (1 + self.annual_transmission_loss) gen_eff = self.intertie_generation_efficiency self.intertie_offset_generation_fuel_used = \ self.intertie_offset_generation / gen_eff #~ print 'self.proposed_generation',self.proposed_generation #~ print con
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def calculate_and_set_propagation_distances(self):\n\n self.l_edge = self.calculate_distance_edge()\n self.l_int = self.calculate_distance_interaction()", "def calculate_module_offsets(self):\n \n # These aren't for instantiating, but we use them to get the dimensions\n self.po...
[ "0.5927528", "0.57913303", "0.5498828", "0.54520005", "0.5265909", "0.52382195", "0.520508", "0.5108444", "0.50968987", "0.5064629", "0.5039651", "0.5012671", "0.4999943", "0.4968546", "0.4968546", "0.49591216", "0.4952447", "0.49499637", "0.49423927", "0.49011195", "0.489852...
0.62144107
0
Calculate the status quo generation in the community . Attributes
def calc_pre_intertie_generation (self): self.pre_intertie_generation = \ self.forecast.get_generation(self.start_year,self.end_year) gen_eff = self.cd["diesel generation efficiency"] self.pre_intertie_generation_fuel_used = \ self.pre_intertie_generation / gen_eff #~ print 'self.baseline_generatio',self.baseline_generation
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_total_shield(self,obs):", "def advancedStats():", "def calc_stat_values(self):", "def setMyStatus(self):\n self.clearMyStatus()\n for id, myComponent in self.components.iteritems():\n self.currentComps += 1\n if myComponent.type != '':\n compData = s...
[ "0.5788508", "0.5613959", "0.55464876", "0.55033845", "0.546878", "0.54424125", "0.53677684", "0.5304037", "0.5268452", "0.52517754", "0.5244365", "0.52423894", "0.52423894", "0.52004385", "0.5194574", "0.51699513", "0.5161064", "0.51408136", "0.5137545", "0.51323235", "0.512...
0.0
-1
Calculate the heat recovery
def calc_lost_heat_recovery (self): if not self.cd['heat recovery operational']: self.lost_heat_recovery = [0] else: gen_eff = self.cd["diesel generation efficiency"] self.lost_heat_recovery = \ (self.generation / gen_eff )* .10
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def calc_loss_heat_recovery (self):\n hr_used = self.cd['heat recovery operational']\n self.loss_heat_recovery = 0\n if hr_used:# == 'Yes':\n self.loss_heat_recovery = self.electric_diesel_reduction * \\\n (self.comp_specs['percent heat recovered'] / 100.0)\n #...
[ "0.72527164", "0.6295159", "0.6292347", "0.62802047", "0.6267784", "0.60749567", "0.59754294", "0.59540564", "0.58983105", "0.5896097", "0.589459", "0.5840689", "0.56996167", "0.56794786", "0.5667763", "0.5648994", "0.56376565", "0.56210124", "0.5620937", "0.55695546", "0.554...
0.75004613
0
Calculate the capital costs. Attributes
def calc_capital_costs (self): road_needed = 'road needed' if self.cd['on road system']: road_needed = 'road not needed' dist = self.comp_specs['distance to community'] self.capital_costs = self.comp_specs['est. intertie cost per mile']\ [road_needed] * dist #~ print self.capital_costs
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def calc_capital_costs (self):\n self.capital_costs = self.max_boiler_output * \\\n self.comp_specs[\"cost per btu/hrs\"]\n #~ print self.capital_costs", "def calc_capital_costs (self):\n powerhouse_control_cost = 0\n if not self.cd['switchgear suitable ...
[ "0.7806206", "0.74040896", "0.6545671", "0.64966065", "0.64836544", "0.6464849", "0.63642645", "0.6326284", "0.63027006", "0.62339413", "0.62284034", "0.6213556", "0.6188633", "0.61863375", "0.6160501", "0.61119217", "0.60997415", "0.60323167", "0.599988", "0.5985151", "0.595...
0.7694785
1
Calculate annual electric savings created by the project. Attributes
def calc_annual_electric_savings (self): costs = self.comp_specs['diesel generator o&m'] for kW in costs.keys(): try: if self.average_load < int(kW): maintenance = self.comp_specs['diesel generator o&m'][kW] break except ValueError: maintenance = self.comp_specs['diesel generator o&m'][kW] self.baseline_generation_cost = maintenance + \ (self.pre_intertie_generation_fuel_used * self.diesel_prices) maintenance = self.capital_costs * \ (self.comp_specs['percent o&m'] / 100.0) self.proposed_generation_cost = maintenance + \ self.intertie_offset_generation_fuel_used * \ self.intertie_diesel_prices self.annual_electric_savings = self.baseline_generation_cost -\ self.proposed_generation_cost #~ print len(self.annual_electric_savings) #~ print 'self.annual_electric_savings',self.annual_electric_savings
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def calc_annual_electric_savings (self):\n price = self.diesel_prices\n #TODO add rural v non rural\n self.base_generation_cost = self.electric_diesel_reduction * price\n\n\n self.proposed_generation_cost = self.maintenance_cost\n\n self.annual_electric_savings = self.base_genera...
[ "0.7408717", "0.6447516", "0.61507213", "0.56581444", "0.56386214", "0.56319666", "0.56268173", "0.5622094", "0.55830747", "0.55610895", "0.55515134", "0.5541858", "0.5536161", "0.5518753", "0.54845977", "0.5477718", "0.54517406", "0.54387516", "0.54357356", "0.54199076", "0....
0.70783615
1
Calculate annual heating savings created by the project. Attributes
def calc_annual_heating_savings (self): price = self.diesel_prices + self.cd['heating fuel premium'] maintenance = self.comp_specs['heat recovery o&m'] self.annual_heating_savings = -1 * \ (maintenance + (self.lost_heat_recovery * price))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def calc_annual_heating_savings (self):\n price = (self.diesel_prices + self.cd['heating fuel premium'])\n\n #~ self.base_heating_cost =\n\n #~ self.proposed_heating_cost =\n\n\n\n\n self.annual_heating_savings = self.reduction_diesel_used * price\n #~ print 'self.annual_heating_...
[ "0.7293272", "0.65816814", "0.64126164", "0.5913036", "0.59082484", "0.55546623", "0.55409116", "0.5488795", "0.54877865", "0.5483977", "0.54786175", "0.5446557", "0.54036194", "0.5401703", "0.53986675", "0.53903943", "0.53836673", "0.538216", "0.537716", "0.5362062", "0.5353...
0.69910264
1
Get total fuel saved. Returns float the total fuel saved in gallons
def get_fuel_total_saved (self): #~ print self.lost_heat_recovery #~ print self.intertie_offset_generation_fuel_used #~ print self.pre_intertie_generation_fuel_used #~ gen_eff = self.cd["diesel generation efficiency"] #~ fuel_used = self.intertie_offset_generation / gen_eff generation_diesel_reduction = \ np.array(self.pre_intertie_generation_fuel_used\ [:self.actual_project_life]) return - np.array(self.lost_heat_recovery[:self.actual_project_life]) +\ generation_diesel_reduction
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_fuel_total_saved (self):\n return self.electric_diesel_reduction + self.reduction_diesel_used", "def get_total_energy(parameters):\n return orm.Float(parameters.get_attribute('energy'))", "def total_energy(self):\n return self._total_energy", "def totalValue(self):\n\n\t\tvalue = 0\n...
[ "0.824919", "0.67309505", "0.67212445", "0.6641931", "0.6640454", "0.65572923", "0.64542156", "0.6337531", "0.6322669", "0.6316941", "0.6301916", "0.6281241", "0.62791896", "0.6275253", "0.6265616", "0.61895245", "0.6137895", "0.61320716", "0.61031145", "0.6087333", "0.607770...
0.7722197
1
Get total energy produced. Returns float the total energy produced
def get_total_energy_produced (self): return self.pre_intertie_generation[:self.actual_project_life]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def total_energy(self):\n return self._total_energy", "def TotalEnergy(self):\n return (math.sqrt((Particle.RestEnergy(self) ** 2)\n + (np.linalg.norm(Particle.Momentum(self)) * const.speed_of_light) ** 2))", "def get_total_energy_produced (self):\n return self.net_generation_wind",...
[ "0.8479409", "0.79218054", "0.779159", "0.7745281", "0.7656375", "0.7550641", "0.7512186", "0.75108033", "0.75108033", "0.75029075", "0.7464623", "0.73755133", "0.7374632", "0.7320508", "0.72703177", "0.72320104", "0.7221515", "0.7218909", "0.71979094", "0.71671396", "0.71279...
0.77342874
4
Get your current running jobs on the Sherlock cluster
def running_jobs_sherlock(): user = os.environ['USER'] return subprocess.check_output(['squeue', '-u',user,'-o','%Z']).split()[1:]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_current_jobs(ssh):\n stdin, stdout, stderr = ssh.exec_command('qstat')\n\n running_jobs = []\n for line in stdout.readlines():\n if '.awonmgr2' in line:\n jobid = line.split('.awonmgr2')[0]\n running_jobs.append(jobid)\n \n return running_jobs", "async def get_...
[ "0.7408034", "0.72631824", "0.7173004", "0.6856391", "0.6788189", "0.6787631", "0.6698274", "0.6687841", "0.6678214", "0.6678214", "0.66645783", "0.6657631", "0.66119254", "0.65800935", "0.6543887", "0.65284514", "0.64695036", "0.6460383", "0.64549667", "0.64276236", "0.63949...
0.76263565
0
Make directories even if they already exist
def safeMkDir(pth ,verbose ) : try: os.mkdir(pth) except OSError: if verbose: print('directory %s already exists ?!'%pth)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def make_directories(self):\n os.makedirs(self.data_dir, exist_ok=True)\n os.makedirs(self.patches_dir, exist_ok=True)\n os.makedirs(self.raw_image_dir, exist_ok=True)\n os.makedirs(self.pro_image_dir, exist_ok=True)\n os.makedirs(self.results_dir, exist_ok=True)", "def make_fo...
[ "0.76764554", "0.76725763", "0.7648681", "0.7634497", "0.7628304", "0.76242155", "0.75590366", "0.75419897", "0.75321114", "0.75267327", "0.75118554", "0.74595684", "0.7454827", "0.73759156", "0.7366004", "0.7350717", "0.7350717", "0.73391104", "0.73208994", "0.7290557", "0.7...
0.0
-1
Makes a nested directory, even if intermediate links do not yet exist
def safeMkDirForce(pth) : components = pth.split('/') curr_dir = [components[0]] for c in components[1:]: curr_dir.append(c) safeMkDir('/'+os.path.join(*curr_dir),verbose=False)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def create_directory_structure(path_main):\n\n if not path_main.exists():\n path_main.mkdir(parents=True)", "def makedirs(self, parent):\n if not parent.exists():\n logging.msg('Creating directory structure for \"%s\"' % (\n parent.path,), verbosity=2)\n pare...
[ "0.6948033", "0.6809353", "0.68089527", "0.6735363", "0.6733862", "0.67253435", "0.6716056", "0.67078507", "0.6676089", "0.6670901", "0.6664893", "0.66282934", "0.66131335", "0.6590413", "0.65780735", "0.6577037", "0.6561761", "0.6561761", "0.65132505", "0.6512065", "0.651137...
0.6692424
8
Flattens a list of Lists to a list
def flatten(lol ): return [item for sublist in lol for item in sublist]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def flat_list_of_lists(l):\n return [item for sublist in l for item in sublist]", "def flattened(list_of_lists):\n res = functools.reduce(operator.iconcat, list_of_lists, [])\n return res", "def flatten(list_of_lists: List[List]) -> List:\n return reduce(iconcat, list_of_lists, [])", "def fla...
[ "0.85435647", "0.85404813", "0.852175", "0.85197717", "0.848276", "0.84166855", "0.839817", "0.8382662", "0.83767587", "0.83767587", "0.83384365", "0.8302829", "0.82918155", "0.82290846", "0.82144505", "0.82091016", "0.81558263", "0.81378996", "0.8100652", "0.8090682", "0.806...
0.7730698
34
Merge dictionaries, presumes no overlap in keys
def merge_dicts(listDicts) : return dict(itertools.chain.from_iterable([x.items() for x in listDicts]))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def dict_merge(dict1, dict2):\n combined = dict(dict1)\n combined.update(dict2)\n return combined", "def dict_merge(dict1, dict2):\n combined = dict(dict1)\n combined.update(dict2)\n return combined", "def merge(dict1: dict, dict2: dict):\n if dict1 is None or dict2 is None:\n retur...
[ "0.76170945", "0.76170945", "0.7527569", "0.749968", "0.7440527", "0.7404144", "0.73930734", "0.73853135", "0.73769873", "0.73720855", "0.7343238", "0.72534424", "0.7251481", "0.72268325", "0.7226796", "0.72029424", "0.7192793", "0.71703434", "0.7158246", "0.71368694", "0.712...
0.6827089
59
Run DFS from some (starting or intermediate) State until termination
def run_dfs(self,s): if self.verbose: print('entering run_dfs with s = ',s) new_states = [self.succ(s,a) for a in self.actions(s)] results = [] for ns in new_states: if self.verbose: print('considering new state = ',ns) end = self.is_end(ns) if end: result = self.result(ns) if result is not None: results.append(result) else: results += self.run_dfs(ns) return results
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def DFS(initial_state, check_dict): \r\n \r\n print(\"Implementing DFS...\")\r\n q = deque()\r\n q.append(initial_state)\r\n accomplished = False\r\n \r\n while len(q) != 0:\r\n path = q.pop()\r\n \r\n if is_goal(path[-1][0]):\r\n goal = path\r\n a...
[ "0.71973026", "0.69657785", "0.69124275", "0.6738573", "0.6701349", "0.6647733", "0.66013175", "0.6577175", "0.6573915", "0.6554322", "0.65495586", "0.6530897", "0.6520473", "0.6477678", "0.64633286", "0.64230543", "0.6422694", "0.6400245", "0.6394926", "0.6365001", "0.635706...
0.695327
2
If a datagram is available, get it and return it, otherwise return None.
def get_net_message(): # TODO: refactor to use a list of events encoded using masgpack? try: message, address = serverSocket.recvfrom(1024) except: return None, None message = message.decode('utf-8') return message, address
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_device_or_None(id):\n try:\n d = Device.objects.get(id=id)\n return d\n except Device.DoesNotExist:\n return None", "def try_get(self, device_id):\n with self.lock:\n return self.devices.get(device_id, None)", "def fetch_packet_from_analyzer(self):\n\n ...
[ "0.5770412", "0.5708058", "0.5640891", "0.5573784", "0.5538548", "0.5535006", "0.5515138", "0.54124844", "0.5348417", "0.5336429", "0.532234", "0.5316779", "0.5302935", "0.5275738", "0.5237896", "0.52374405", "0.5197468", "0.5190849", "0.5188156", "0.51715595", "0.51383466", ...
0.0
-1
simply sends a message to the client address specified.
def send_net_message_client(message, client_addr): serverSocket.sendto(message, client_addr)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def msg_client(msg, client):\r\n client.send(bytes(str(msg), \"utf-8\"))", "def sendToClient(self, client_id, message_type, message):\n if not client_id in self.client_to_socket:\n raise ValueError(\"The client with id {} does not exist\".format(client_id))\n self.sendToSocket(self.cl...
[ "0.7387198", "0.6993373", "0.6930433", "0.69239", "0.6904713", "0.68367815", "0.6811525", "0.67738485", "0.6729188", "0.6701323", "0.6656938", "0.6642157", "0.66237146", "0.6605746", "0.6597053", "0.6596277", "0.6579792", "0.65749264", "0.6560786", "0.6554997", "0.65531754", ...
0.82171863
0
process incoming messages from clients.
def process_net_message(message, address): if message[0] == '<' and message[-1] == '>': message = message[1:-1] if ":" in message: command, data = message.split(":") else: command = message data = None if command == "JOIN": print("added player to player list:", data, address) ip_address, port = address active_player_dict[str(address)] = Player(ip_address, port, data, random.randint(0, 639), random.randint(0, 479)) elif command == "QUIT": print("player removed from player list:", address) del active_player_dict[str(address)] elif command == "KD": data = chr(int(data)) if data not in active_player_dict[str(address)].keys_down: active_player_dict[str(address)].keys_down.append(data) elif command == "KU": data = chr(int(data)) if data in active_player_dict[str(address)].keys_down: active_player_dict[str(address)].keys_down.remove(data) elif command == "keepAlive": data = int(data) if active_player_dict[str(address)].alive > 0: #time for player to be alive is not zero active_player_dict[str(address)].alive = data currentTime = time.time() else: print("invalid message.")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def process_clients():\n for client in state.CLIENT_LIST:\n if client.active and client.cmd_ready:\n logging.debug(\"Found a message, processing...\")\n msg_processor(client)", "async def _process_messages(self) -> None:\n try:\n while not self._client.closed:\n ...
[ "0.7975024", "0.74111116", "0.74002844", "0.7238532", "0.72266316", "0.7161251", "0.70975065", "0.7079745", "0.69220155", "0.68791217", "0.68509686", "0.6820404", "0.6802297", "0.6784278", "0.67002666", "0.66915977", "0.668222", "0.6679292", "0.66399705", "0.6637506", "0.6635...
0.0
-1
Encontra o ponto G1, a partir dos elementos alfa (a) e beta (b), que expressa o grau de confiabilidade do conjunto de classes.
def assurance(a, b): return a - b
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _get_vb_class(self, a_feats, a_toks1, a_toks2):\n # find intersecting verb classes\n vb_classes = Counter()\n vb_cls1 = vb_cls2 = None\n for w1, p1 in a_toks1:\n if w1 not in LCSI or p1 not in VB_TAGS:\n continue\n vb_cls1 = LCSI[w1]\n ...
[ "0.6031749", "0.5755874", "0.5419031", "0.53809613", "0.5378334", "0.5378009", "0.53204846", "0.52991605", "0.52701527", "0.52547026", "0.52520907", "0.523579", "0.51886666", "0.5179942", "0.51621455", "0.5144527", "0.5130142", "0.5116478", "0.51110935", "0.51048064", "0.5101...
0.0
-1
EscrowTransactionResponse a model defined in Swagger
def __init__(self, id=None, payee_wallet_id=None, payer_wallet_id=None, amount=None, withdrawn=None, escrow_address=None, record_status=None, create_date=None, update_date=None): # noqa: E501 # noqa: E501 self._id = None self._payee_wallet_id = None self._payer_wallet_id = None self._amount = None self._withdrawn = None self._escrow_address = None self._record_status = None self._create_date = None self._update_date = None self.discriminator = None if id is not None: self.id = id if payee_wallet_id is not None: self.payee_wallet_id = payee_wallet_id if payer_wallet_id is not None: self.payer_wallet_id = payer_wallet_id if amount is not None: self.amount = amount if withdrawn is not None: self.withdrawn = withdrawn if escrow_address is not None: self.escrow_address = escrow_address if record_status is not None: self.record_status = record_status if create_date is not None: self.create_date = create_date if update_date is not None: self.update_date = update_date
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def model_to_rest_resource(self, model, verbose=False):\n return Resource(model, TRANSACTION_FIELDS).to_dict(verbose)", "def _create_response_model(self, data):\n pass", "def to_dict(self):\n result = {}\n\n for attr, _ in six.iteritems(self.swagger_types):\n value = geta...
[ "0.6470275", "0.57408893", "0.56040984", "0.5564933", "0.55613655", "0.554139", "0.55230355", "0.5476537", "0.5448558", "0.5446895", "0.5433953", "0.5374998", "0.5326653", "0.5325249", "0.5271114", "0.5253841", "0.52191997", "0.5192388", "0.5190319", "0.5184639", "0.51791036"...
0.0
-1
Sets the id of this EscrowTransactionResponse.
def id(self, id): self._id = id
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def set_id(self, id):\n self.data['id'] = id", "def set_id(self, id):\n self.__id = id", "def SetId(self, id):\n self.id = int(id)", "def set_id(self, id_):\n\n self.id_ = id_", "def set_id(self, id):\n\n\t\tif id is not None and not isinstance(id, int):\n\t\t\traise SDKExceptio...
[ "0.7112411", "0.68683225", "0.6860865", "0.6837625", "0.6821203", "0.67498773", "0.6711314", "0.6711314", "0.6711314", "0.6711314", "0.6711314", "0.6605837", "0.6605837", "0.6605837", "0.6605837", "0.6605837", "0.6605837", "0.6605837", "0.6605837", "0.6605837", "0.6605837" ]
0.6603375
92
Sets the payee_wallet_id of this EscrowTransactionResponse.
def payee_wallet_id(self, payee_wallet_id): self._payee_wallet_id = payee_wallet_id
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def payer_wallet_id(self, payer_wallet_id):\n\n self._payer_wallet_id = payer_wallet_id", "def payor_id(self, payor_id):\n\n self._payor_id = payor_id", "def payee_zip(self, payee_zip):\n\n self._payee_zip = payee_zip", "def set_merchant_transaction_id(self, transaction_id):\n sel...
[ "0.7311907", "0.573136", "0.54004514", "0.53062975", "0.53006345", "0.5269258", "0.5153421", "0.5153421", "0.4994353", "0.4974469", "0.49704325", "0.49286303", "0.49132255", "0.4910477", "0.4855245", "0.48479044", "0.48479044", "0.48479044", "0.48202246", "0.47650665", "0.474...
0.81508285
0
Sets the payer_wallet_id of this EscrowTransactionResponse.
def payer_wallet_id(self, payer_wallet_id): self._payer_wallet_id = payer_wallet_id
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def payee_wallet_id(self, payee_wallet_id):\n\n self._payee_wallet_id = payee_wallet_id", "def payor_id(self, payor_id):\n\n self._payor_id = payor_id", "def set_merchant_transaction_id(self, transaction_id):\n self.merchant_transaction_id = transaction_id", "def merchant_id(self, mercha...
[ "0.7696529", "0.60767055", "0.56332916", "0.56299317", "0.5272401", "0.5272401", "0.5213051", "0.5161533", "0.51283133", "0.5097511", "0.4929194", "0.48645753", "0.48536748", "0.48232916", "0.4775775", "0.47211295", "0.47165722", "0.4694163", "0.46884617", "0.4682201", "0.465...
0.84063405
0
Sets the amount of this EscrowTransactionResponse.
def amount(self, amount): self._amount = amount
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def set_amount(self, amount):\n self.amount = amount", "def transaction_amount(self, transaction_amount):\n\n self._transaction_amount = transaction_amount", "def amount(self, amount):\n if amount is None:\n raise ValueError(\"Invalid value for `amount`, must not be `None`\") #...
[ "0.7653338", "0.7072966", "0.6543301", "0.6543301", "0.64166534", "0.6306864", "0.6261621", "0.6219271", "0.609621", "0.6081896", "0.60315126", "0.6025742", "0.60226005", "0.60226005", "0.6014517", "0.5932473", "0.5896679", "0.5890197", "0.5884229", "0.58601516", "0.58400244"...
0.76107574
9
Sets the withdrawn of this EscrowTransactionResponse.
def withdrawn(self, withdrawn): self._withdrawn = withdrawn
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def withdrawMoney(self, withdraw_amount):\r\n if (self.balance_amt - withdraw_amount) > 0:\r\n self.balance_amt = self.balance_amt - withdraw_amount\r\n else:\r\n raise WithdrawError #Exception('Overdraft withdrawal Error. Cannot withdraw more than amount in account balance: {}'...
[ "0.5891516", "0.5690135", "0.5676766", "0.5597701", "0.55876815", "0.55855316", "0.55006677", "0.5495458", "0.5472348", "0.54654574", "0.5433065", "0.53317225", "0.5330039", "0.5313943", "0.5277022", "0.52171", "0.51859295", "0.51577157", "0.5134747", "0.5110238", "0.5054822"...
0.78851956
0
Sets the escrow_address of this EscrowTransactionResponse.
def escrow_address(self, escrow_address): self._escrow_address = escrow_address
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def set_address(self, address):\n pass", "def address(self, address: object):\n\n self._address = address", "def address(self, address):\n\n self._address = address", "def address(self, address):\n\n self._address = address", "def address(self, address):\n\n self._address...
[ "0.60267246", "0.5820161", "0.57166636", "0.57166636", "0.57166636", "0.57166636", "0.57166636", "0.57166636", "0.57166636", "0.57166636", "0.56964433", "0.5556428", "0.55402744", "0.55071515", "0.5419375", "0.5411141", "0.54009694", "0.538831", "0.5359071", "0.53501517", "0....
0.83312446
0
Sets the record_status of this EscrowTransactionResponse.
def record_status(self, record_status): self._record_status = record_status
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _set_status(self):\n result = self._get_status()\n if result and result[0]['state'] == 'aborted':\n raise Exception(\"Aborted because the status flag is set to 'aborted' in dynamodb\")\n\n # record the status\n self.status['timestamp'] = time.strftime(\"%Y-%m-%dT%H:%M:%SZ...
[ "0.5884911", "0.5822956", "0.5775433", "0.56188405", "0.56188405", "0.56188405", "0.561294", "0.55961376", "0.5581018", "0.5559708", "0.5559708", "0.5559708", "0.5559708", "0.5559708", "0.5559708", "0.5559708", "0.5553998", "0.5553998", "0.5553998", "0.5553998", "0.5553998", ...
0.783832
0
Sets the create_date of this EscrowTransactionResponse.
def create_date(self, create_date): self._create_date = create_date
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def creation_date(self, creation_date):\n\n self._creation_date = creation_date", "def creation_date(self, creation_date):\n\n self._creation_date = creation_date", "def created_date(self, created_date):\n\n self._created_date = created_date", "def created_date(self, created_date):\n\n ...
[ "0.68681526", "0.68681526", "0.68388635", "0.68388635", "0.68388635", "0.68388635", "0.68388635", "0.6816522", "0.6814412", "0.6662243", "0.66395116", "0.6612482", "0.6612482", "0.6612482", "0.65789974", "0.65789974", "0.6504573", "0.64964783", "0.64300257", "0.61280626", "0....
0.80460167
2
Sets the update_date of this EscrowTransactionResponse.
def update_date(self, update_date): self._update_date = update_date
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def updated_date(self, updated_date):\n\n self._updated_date = updated_date", "def updated_date(self, updated_date):\n\n self._updated_date = updated_date", "def updated_date(self, updated_date):\n self._updated_date = updated_date", "def updated_date(self, updated_date):\n if upd...
[ "0.72074246", "0.72074246", "0.7175995", "0.6495363", "0.62919956", "0.6009002", "0.57752734", "0.5506892", "0.5506892", "0.5489573", "0.5442477", "0.54176337", "0.54176337", "0.54176337", "0.5408546", "0.5396654", "0.5396654", "0.5396654", "0.5396654", "0.5396654", "0.539665...
0.7807129
1
Returns the model properties as a dict
def to_dict(self): result = {} for attr, _ in six.iteritems(self.swagger_types): value = getattr(self, attr) if isinstance(value, list): result[attr] = list(map( lambda x: x.to_dict() if hasattr(x, "to_dict") else x, value )) elif hasattr(value, "to_dict"): result[attr] = value.to_dict() elif isinstance(value, dict): result[attr] = dict(map( lambda item: (item[0], item[1].to_dict()) if hasattr(item[1], "to_dict") else item, value.items() )) else: result[attr] = value if issubclass(EscrowTransactionResponse, dict): for key, value in self.items(): result[key] = value return result
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def to_dict(self):\n return self.properties", "def to_dict(self):\n return self.properties", "def get_properties(self):\n return self.properties", "def asdict(self):\n return self._prop_dict", "def json(self):\n rv = {\n prop: getattr(self, prop)\n f...
[ "0.7751993", "0.7751993", "0.73391134", "0.7334895", "0.7297356", "0.727818", "0.7159078", "0.71578115", "0.71494967", "0.71494967", "0.71283495", "0.71275014", "0.7122587", "0.71079814", "0.7060394", "0.7043251", "0.7034103", "0.70233124", "0.69635814", "0.69586295", "0.6900...
0.0
-1
Returns the string representation of the model
def to_str(self): return pprint.pformat(self.to_dict())
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __str__(self):\n return super().__str__() + self.model.__str__()", "def __str__(self) -> str:\n # noinspection PyUnresolvedReferences\n opts = self._meta\n if self.name_field:\n result = str(opts.get_field(self.name_field).value_from_object(self))\n else:\n ...
[ "0.85856134", "0.7814518", "0.77898884", "0.7751367", "0.7751367", "0.7712228", "0.76981676", "0.76700574", "0.7651133", "0.7597206", "0.75800353", "0.7568254", "0.7538184", "0.75228703", "0.7515832", "0.7498764", "0.74850684", "0.74850684", "0.7467648", "0.74488163", "0.7442...
0.0
-1
For `print` and `pprint`
def __repr__(self): return self.to_str()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def pprint(*args, **kwargs):\n if PRINTING:\n print(*args, **kwargs)", "def print_out():\n pass", "def custom_print(*objects):\n print(*objects, sep=OFS, end=ORS)", "def _print(self, *args):\n return _ida_hexrays.vd_printer_t__print(self, *args)", "def _printable(self):\n ...
[ "0.75577617", "0.73375154", "0.6986672", "0.698475", "0.6944995", "0.692333", "0.6899106", "0.6898902", "0.68146646", "0.6806209", "0.6753795", "0.67497987", "0.6744008", "0.6700308", "0.6691256", "0.6674591", "0.6658083", "0.66091245", "0.6606931", "0.6601862", "0.6563738", ...
0.0
-1
Returns true if both objects are equal
def __eq__(self, other): if not isinstance(other, EscrowTransactionResponse): return False return self.__dict__ == other.__dict__
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __eq__(self, other):\n return are_equal(self, other)", "def __eq__(self, other):\n return are_equal(self, other)", "def __eq__(self,other):\n try: return self.object==other.object and isinstance(self,type(other))\n except: return False", "def __eq__(self, other):\n if i...
[ "0.8088132", "0.8088132", "0.8054589", "0.7982687", "0.79670393", "0.79670393", "0.79670393", "0.79670393", "0.79670393", "0.79670393", "0.79670393", "0.79670393", "0.79670393", "0.79670393", "0.79670393", "0.79670393", "0.79670393", "0.79670393", "0.79670393", "0.79670393", ...
0.0
-1
Returns true if both objects are not equal
def __ne__(self, other): return not self == other
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __ne__(self, other: object) -> bool:\n if self.__eq__(other):\n return False\n return True", "def __ne__(self, other: object) -> bool:\n return not self.__eq__(other)", "def __ne__(self, other) -> bool:\n return not self.__eq__(other)", "def __eq__(self, other):\n ...
[ "0.845611", "0.8391477", "0.8144138", "0.81410587", "0.8132492", "0.8093973", "0.80920255", "0.80920255", "0.80920255", "0.8085325", "0.8085325", "0.8076365", "0.8076365", "0.8065748" ]
0.0
-1
Helper to log the failed SQS records metric
def _log_failed(cls, count): MetricLogger.log_metric(FUNCTION_NAME, MetricLogger.SQS_FAILED_RECORDS, count)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def log_failures(self):\n for exception in self.queue_manager.failure_descriptions():\n self.logger.info(exception)", "def test_failed_deliveries_logging(self):\n sms = SMS.objects.create(to='+6280000000000', status=STATUS.queued,\n backend_alias='error')\...
[ "0.65698266", "0.6314558", "0.6210403", "0.6180184", "0.61691684", "0.6075002", "0.6070457", "0.6059748", "0.6040478", "0.598686", "0.5963177", "0.5926306", "0.5920489", "0.5919631", "0.58492374", "0.5753404", "0.5748351", "0.5638719", "0.5619237", "0.56000847", "0.55767053",...
0.8446626
0
Segment the records into batches that conform to SQS restrictions This will log any single record that is too large to send, and skip it.
def _message_batches(cls, records): # Dump the records to a list of minimal json records_json = [ json.dumps(record, separators=(',', ':')) for record in records ] current_batch_size = 0 current_batch = [] for record in records_json: line_len = len(record) # Check if the max size of the batch has been reached or if the current # record will exceed the max batch size and start a new batch if ((len(current_batch) == cls.MAX_BATCH_COUNT) or (current_batch_size + line_len > cls.MAX_BATCH_SIZE)): yield current_batch[:] current_batch_size = 0 del current_batch[:] if line_len > cls.MAX_BATCH_SIZE: LOGGER.error('Record too large (%d) to send to SQS:\n%s', line_len, record) cls._log_failed(1) continue # Add the record to the batch current_batch_size += line_len current_batch.append(record) # yield the result of the last batch (no need to copy via slicing) if current_batch: yield current_batch
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _log_process(self, log_req):\n rq_size = log_req.multipart_size\n with self._lock:\n if self._payload_size + rq_size >= self.max_payload_size:\n if len(self._batch) > 0:\n self._send_batch()\n self._batch.append(log_req)\n self._p...
[ "0.61672634", "0.61136776", "0.55916774", "0.55586743", "0.55274945", "0.5349872", "0.5317542", "0.5314152", "0.51939815", "0.519289", "0.5187157", "0.51492345", "0.5112248", "0.50969017", "0.5063732", "0.5059225", "0.5043429", "0.5042894", "0.5032821", "0.49991766", "0.49933...
0.62689286
0
Perform any final operations for this response, such as metric logging, etc
def _finalize(self, response, batch): if not response: return # Could happen in the case of backoff failing enitrely # Check for failures that occurred in PutRecordBatch after several backoff attempts # And log the actual record from the batch failed = self._check_failures(response, batch=batch) # Remove the failed messages in this batch for an accurate metric successful_records = len(batch) - failed MetricLogger.log_metric(FUNCTION_NAME, MetricLogger.SQS_RECORDS_SENT, successful_records) LOGGER.info( 'Successfully sent %d message(s) to queue %s', successful_records, self.queue.url )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def main_response(self, data):", "def main_response(self, data):", "def finalize_response(self, request, response, *args, **kwargs):\n if response.status_code == HTTP_201_CREATED:\n diagnosis = Diagnosis.objects.get(pk=response.data.get('id'))\n serializer = calculate_percentage(di...
[ "0.63405913", "0.63405913", "0.6215644", "0.61835784", "0.61431", "0.61020917", "0.6028117", "0.6016297", "0.5995891", "0.5948878", "0.5946144", "0.59437275", "0.59169286", "0.5887103", "0.5878732", "0.58776665", "0.58697927", "0.5867941", "0.5867941", "0.5843703", "0.5823026...
0.6289759
2
Inspect the response and remove any records records that have successfully to sent For each record, the index of the response element is the same as the index used in the request array.
def _strip_successful_records(cls, messages, response): success_ids = { item['Id'] for item in response['Successful'] } LOGGER.info('Removing sucessful message indices from batch: %s', success_ids) for success_id in success_ids: # Get the successful message by ID and remove it message = cls._extract_message_by_id(messages, success_id) if not message: continue messages.remove(message)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def clean_none_response(self):\n\n print(\"# Rows before non response are removed: {} \".format(len(self.data)))\n self.data = self.data[self.data['names'].map(lambda d: len(d) > 0)]\n print(\"# Rows after non response are removed: {} \".format(len(self.data)))", "def _finalize(self, respons...
[ "0.6261296", "0.5926714", "0.5847716", "0.58347917", "0.5816505", "0.568727", "0.5628807", "0.55821556", "0.55515593", "0.5497828", "0.5445062", "0.5384468", "0.5301025", "0.52775407", "0.5272712", "0.52238524", "0.5213868", "0.51214164", "0.51052916", "0.50793844", "0.507387...
0.7417818
0
Inspect the response to see if the failure was our fault (the Sender)
def _check_failures(self, response, batch=None): if not response.get('Failed'): return 0 # nothing to do here LOGGER.error('The following records failed to put to queue %s', self.queue.url) for failure in response['Failed']: # Pull out the record that matches this ID record = self._extract_message_by_id(batch, failure['Id']) if batch else None LOGGER.error(self._format_failure_message(failure, record=record)) failed = len(response.get('Failed', [])) self._log_failed(failed) # Raise an exception if this is the fault of the sender (us) if any(result['SenderFault'] for result in response['Failed']): raise SQSClientError('Failed to send records to SQS:\n{}'.format(response)) return failed
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def testFailure(self):\n request = b'hello'\n reply = self.sendAndReceive(request)\n self.assertEqual(2, reply[0])", "def check_response_errors(self, resp):\n return True", "def check_response(response):\n status = response.get('status')\n ret = status and status == 'OK'\n ...
[ "0.6853921", "0.68304646", "0.67763245", "0.6721088", "0.6715591", "0.67042756", "0.66370153", "0.6635388", "0.6612699", "0.6506884", "0.64258915", "0.63320297", "0.633047", "0.62748057", "0.6268025", "0.6258739", "0.6243886", "0.6212098", "0.6211683", "0.61967474", "0.616833...
0.6120679
23
Send new formatted messages to CSIRT SQS
def _send_messages(self, batched_messages): @backoff.on_predicate(backoff.fibo, lambda resp: len(resp.get('Failed', [])) > 0, max_tries=self.MAX_BACKOFF_ATTEMPTS, max_value=self.MAX_BACKOFF_FIBO_VALUE, on_backoff=backoff_handler(debug_only=False), on_success=success_handler(), on_giveup=giveup_handler()) @backoff.on_exception(backoff.expo, self.EXCEPTIONS_TO_BACKOFF, max_tries=self.MAX_BACKOFF_ATTEMPTS, on_backoff=backoff_handler(debug_only=False), on_success=success_handler(), on_giveup=giveup_handler()) def _send_messages_helper(entries): """Inner helper function for sending messages with backoff_handler Args: entries (list<dict>): List of SQS SendMessageBatchRequestEntry items """ LOGGER.info('Sending %d message(s) to %s', len(entries), self.queue.url) response = self.queue.send_messages(Entries=entries) if response.get('Successful'): LOGGER.info( 'Successfully sent %d message(s) to %s with MessageIds %s', len(response['Successful']), self.queue.url, ', '.join( '\'{}\''.format(resp['MessageId']) for resp in response['Successful'] ) ) if response.get('Failed'): self._check_failures(response) # Raise an exception if this is our fault self._strip_successful_records(entries, response) return response message_entries = [ { 'Id': str(idx), 'MessageBody': message } for idx, message in enumerate(batched_messages) ] # The try/except here is to catch any raised errors at the end of the backoff try: return _send_messages_helper(message_entries) except self.EXCEPTIONS_TO_BACKOFF: LOGGER.exception('SQS request failed') # Use the current length of the message_entries in case some records were # successful but others were not self._log_failed(len(message_entries)) return
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def send_sqs_message(imageinfo):\r\n try:\r\n message = json.dumps(imageinfo)\r\n sqsclient = boto3.client('sqs')\r\n sqsclient.send_message(\r\n QueueUrl=os.environ['QueueURL'],\r\n MessageBody=message\r\n )\r\n\r\n except ClientError as err:\r\n ...
[ "0.66801953", "0.6378957", "0.6152751", "0.60166675", "0.5753532", "0.5741753", "0.5703277", "0.56923753", "0.5691338", "0.5684069", "0.5646882", "0.5642848", "0.56269246", "0.56122345", "0.55771816", "0.5573004", "0.55638814", "0.55422395", "0.5541555", "0.55407935", "0.5535...
0.0
-1
Inner helper function for sending messages with backoff_handler
def _send_messages_helper(entries): LOGGER.info('Sending %d message(s) to %s', len(entries), self.queue.url) response = self.queue.send_messages(Entries=entries) if response.get('Successful'): LOGGER.info( 'Successfully sent %d message(s) to %s with MessageIds %s', len(response['Successful']), self.queue.url, ', '.join( '\'{}\''.format(resp['MessageId']) for resp in response['Successful'] ) ) if response.get('Failed'): self._check_failures(response) # Raise an exception if this is our fault self._strip_successful_records(entries, response) return response
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _send_messages(self, batched_messages):\n @backoff.on_predicate(backoff.fibo,\n lambda resp: len(resp.get('Failed', [])) > 0,\n max_tries=self.MAX_BACKOFF_ATTEMPTS,\n max_value=self.MAX_BACKOFF_FIBO_VALUE,\n ...
[ "0.74842525", "0.69976413", "0.61308867", "0.6106848", "0.6101564", "0.60256076", "0.57225543", "0.56751674", "0.55376226", "0.5528896", "0.5524516", "0.55091655", "0.55081546", "0.5478417", "0.54683506", "0.54618955", "0.5446962", "0.541031", "0.53984827", "0.53874975", "0.5...
0.5390745
19