query
stringlengths
9
3.4k
document
stringlengths
9
87.4k
metadata
dict
negatives
listlengths
4
101
negative_scores
listlengths
4
101
document_score
stringlengths
3
10
document_rank
stringclasses
102 values
Send the commmit notification to CIA. The message is created incrementally using lxml's "E" builder.
def notify(self, builder): # Build the <files> section for the template... commit = builder.commit files = E.files() commit_msg = commit.message.strip() commit_msg = re.sub(r'[\x00-\x09\x0B-\x1f\x7f-\xff]', '', commit_msg) for filename in commit.files_changed: safe_filename = re.sub(r'[\x00-\x09\x0B-\x1f\x7f-\xff]', '', filename) file_element = E.file(safe_filename) files.append(file_element) # Build the message cia_message = self.MESSAGE() cia_message.append(self._generator) source = self.SOURCE(E.project("KDE")) source.append(E.module(self.repository.path)) source.append(E.branch(self.repository.ref_name)) cia_message.append(source) cia_message.append(self.TIMESTAMP(commit.date)) body = self.BODY() commit_data = self.COMMIT() commit_data.append(E.author(commit.author_name)) commit_data.append(E.revision(commit.description)) commit_data.append(files) commit_data.append(E.log(commit_msg)) commit_data.append(E.url(commit.url)) body.append(commit_data) cia_message.append(body) # Convert to a string commit_xml = etree.tostring(cia_message) # Craft the email.... message = MIMEText( commit_xml, 'xml', 'utf-8' ) message['Subject'] = "DeliverXML" message['From'] = "sysadmin@kde.org" message['To'] = "commits@platna.kde.org" # Send email... self.smtp.sendmail("sysadmin@kde.org", ["commits@platna.kde.org"], message.as_string())
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def notify(guid, message):", "def notify(self, id, command, data = None):\n print \"sending:\", id, command, data\n if command == Code.START: data = [id]\n try:\n msg = Message(command = command, data = data)\n self.contacts[id].send(msg.encode())\n except:\n ...
[ "0.5887094", "0.5750634", "0.57312334", "0.57009876", "0.5486229", "0.54457545", "0.5437605", "0.53316367", "0.5314361", "0.52677506", "0.52383125", "0.5220476", "0.52009946", "0.5194496", "0.5170836", "0.51647365", "0.51592344", "0.51445454", "0.5126826", "0.5122613", "0.511...
0.5796216
1
Check for potential problems in a commit.
def check_commit_problems(self, commit, diff): # Initialise self._license_problem = False self._commit_problem = False self._commit_notes = defaultdict(list) # Unsafe regex checks... unsafe_matches = list() unsafe_matches.append( r"\b(KRun::runCommand|K3?ShellProcess|setUseShell|setShellCommand)\b\s*[\(\r\n]" ) unsafe_matches.append( r"\b(system|popen|mktemp|mkstemp|tmpnam|gets|syslog|strptime)\b\s*[\(\r\n]" ) unsafe_matches.append( r"(scanf)\b\s*[\(\r\n]" ) valid_filename_regex = r"\.(cpp|cc|cxx|C|c\+\+|c|l|y||h|H|hh|hxx|hpp|h\+\+|qml)$" # Retrieve the diff and do the problem checks... filename = unicode("") filediff = list() for line in diff: file_change = re.match( "^diff --(cc |git a\/.+ b\/)(.+)$", line ) if file_change: # Are we changing file? If so, we have the full diff, so do a license check.... if filename != "" and commit.files_changed[ filename ]["change"] in ['A'] and re.search(valid_filename_regex, filename): self.check_commit_license(filename, ''.join(filediff)) filediff = list() filename = file_change.group(2) continue # Diff headers are bogus if re.match("@@ -\d+,\d+ \+\d+ @@", line): filediff = list() continue # Do an incremental check for *.desktop syntax errors.... if re.search("\.desktop$", filename) and re.search("[^=]+=.*[ \t]$", line) and line.startswith("+") and not re.match("^\+#", line): self._commit_notes[filename].append( "[TRAILING SPACE] **" ) self._commit_problem = True # Check for things which are unsafe... for safety_match in unsafe_matches: match = re.match(safety_match, line) if match: note = "[POSSIBLY UNSAFE: {0}] **".format( match.group(1) ) self._commit_notes[filename].append(note) self._commit_problem = True # Store the diff.... filediff.append(line) if filename != "" and commit.files_changed[ filename ]["change"] in ['A'] and re.search(valid_filename_regex, filename): self.check_commit_license(filename, ''.join(filediff))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def commit_check(ctx):\n result = ctx.run(f\"{VENV_PREFIX} cz check --rev-range master..\", warn=True)\n if result.exited == 3: # NO_COMMIT_FOUND\n exit(0)\n else:\n exit(result.exited)", "def verify_git_clean(path):\n\n sys.stdout.write(\" - Checking for uncommitted changes:\")\n r...
[ "0.7050094", "0.69747615", "0.6810369", "0.6753324", "0.67024326", "0.66566247", "0.6637356", "0.65921104", "0.65602815", "0.65418464", "0.6462754", "0.6436193", "0.6383709", "0.6378951", "0.62852246", "0.6269505", "0.6268365", "0.6259074", "0.6205398", "0.6135695", "0.609353...
0.800173
0
Returns index of the resource to use for making requests to get data if none of the resources are available, then send number of seconds until the resource is not available
def get_resource_index(self): result = -1 max_sleep_time = self.time_window with self._lock: while result == -1: for i in range(0, self.num_keys): curr_sleep_time = max((self.timers[i][0] + self.time_window) - time.time(), 0) max_sleep_time = min(max_sleep_time, curr_sleep_time) if self.timers[i][1] >= self.window_limit and self.timers[i][0] + self.time_window < time.time(): self.timers[i][0] = 0 self.timers[i][1] = 0 if self.timers[i][1] < self.window_limit: result = i break if result == -1: # case when all streams are rate limited # logging.warning('sleeping for %d seconds.' % max_sleep_time) # time.sleep(max_sleep_time) return -1 * max_sleep_time if self.timers[result][0] == 0: self.timers[result][0] = time.time() self.timers[result][1] += 1 return result
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def handle_get():\n for resource in resources:\n\n # acquire lock\n res_lock = resource.get(\"lock\")\n res_lock.acquire()\n\n # Get if available\n if resource.get(\"available\") == \"true\":\n # Available - acquire resource and return\n resource.update({...
[ "0.6093405", "0.5707536", "0.56445384", "0.5552679", "0.55315655", "0.54823583", "0.5422522", "0.5402123", "0.5400669", "0.538704", "0.5379008", "0.5378094", "0.53733474", "0.535927", "0.5357638", "0.53508085", "0.53386307", "0.5338582", "0.5338395", "0.53344756", "0.5322996"...
0.67190135
0
Test Chronos GR Config plugin writes new config when config has changed
def test_chronos_gr_config_changed(self, mock_run_command, mock_safely_write): # Create the plugin plugin = ChronosGRConfigPlugin({}) # Set up the config strings to be tested old_config_string = "Old Chronos GR config" new_config_string = "New Chronos GR config" # Call 'on_config_changed' with file.open mocked out with mock.patch('clearwater_etcd_plugins.chronos.chronos_gr_config_plugin.open', \ mock.mock_open(read_data=old_config_string), create=True) as mock_open: plugin.on_config_changed(new_config_string, None) # Test assertions mock_open.assert_called_once_with(plugin.file(), "r") mock_safely_write.assert_called_once_with(plugin.file(), new_config_string) mock_run_command.assert_called_once_with("/usr/share/clearwater/clearwater-queue-manager/scripts/modify_nodes_in_queue add apply_chronos_gr_config")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_config_update(get_config):\n cfg = get_config(Config, {'test': 'main'})\n update_from = {\"name\": \"new_name\"}\n cfg.update(update_from)\n\n assert cfg.data.get('name') == \"new_name\", \"config was not updated\"", "def test_update_wait():\n wait = '10 seconds'\n config_info = read_c...
[ "0.72591335", "0.72113305", "0.71514034", "0.7107235", "0.7026826", "0.69808435", "0.69801", "0.6910698", "0.6887558", "0.68464094", "0.6824999", "0.68176496", "0.67969394", "0.67067325", "0.6699056", "0.6610438", "0.6609877", "0.6593223", "0.6569542", "0.6516883", "0.6515605...
0.75435835
0
Returns 3d matrix of sizes [257,301,2]
def process(self, data): spectr = stft(data, n_fft=512, hop_length=160) return np.concatenate((spectr.real[:, :, np.newaxis], spectr.imag[:, :, np.newaxis]), axis=2)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def matrix_2d_to_3d(matrix: np.matrix) -> np.matrix:\n return np.matrix([\n [matrix.item(0, 0), matrix.item(0, 1), 0, matrix.item(0, 2)],\n [matrix.item(1, 0), matrix.item(1, 1), 0, matrix.item(1, 2)],\n [0, 0, 1, 0],\n [matrix.item(2, 0), matrix.item(...
[ "0.64607185", "0.6292983", "0.6173402", "0.6133898", "0.6117917", "0.6092436", "0.60344905", "0.601531", "0.6014885", "0.60019165", "0.5841986", "0.58119726", "0.5777618", "0.5760934", "0.57527536", "0.57133275", "0.5710595", "0.56664544", "0.56459385", "0.5627127", "0.559783...
0.0
-1
Finds the intersection of two lines given in Hesse normal form. Returns closest integer pixel locations.
def intersection(line1, line2): rho1, theta1 = line1 rho2, theta2 = line2 A = np.array([ [np.cos(theta1), np.sin(theta1)], [np.cos(theta2), np.sin(theta2)] ]) b = np.array([[rho1], [rho2]]) x0, y0 = np.linalg.solve(A, b) x0, y0 = int(np.round(x0)), int(np.round(y0)) return [x0, y0]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def getIntersection(line1, line2):\r\n\r\n rho1, theta1 = line1[0]\r\n rho2, theta2 = line2[0]\r\n\r\n a = np.array([\r\n [np.cos(theta1), np.sin(theta1)],\r\n [np.cos(theta2), np.sin(theta2)]\r\n ])\r\n\r\n b = np.array([[rho1], [rho2]])\r\n\r\n x, y = np.linalg.solve(a, b)\r\n\r\n...
[ "0.77867657", "0.76443017", "0.75186956", "0.7482537", "0.74320704", "0.72984135", "0.72857475", "0.7276795", "0.7244609", "0.72418344", "0.71921796", "0.71198523", "0.7066881", "0.70585316", "0.70348686", "0.70186913", "0.7002847", "0.6971487", "0.6970397", "0.6936027", "0.6...
0.763136
2
Check correctness of `limiter` value returned.
def test_identify_limit(limit, all, expected): assert identify_limit(limit, all) == expected
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _safe_limit_check(self):\n if self.rem == 40:\n self.time_start = time.time()\n elif time.time() - self.time_start >= 11:\n self.rem = 40\n self.time_start = time.time()\n elif self.rem <= 0:\n t = 11 - (time.time() - self.time_start)\n\n ...
[ "0.6411604", "0.6216477", "0.6159756", "0.59867847", "0.5980849", "0.5841296", "0.5840714", "0.5840501", "0.58299303", "0.5824446", "0.58074856", "0.57916874", "0.577796", "0.5687688", "0.5626272", "0.5561189", "0.5534629", "0.55233294", "0.5515274", "0.55132216", "0.5506859"...
0.52793914
41
Check correctness of iterable returned.
def test_get_row_ids(issues, limiter, expected): assert get_row_ids(issues, limiter) == expected
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _is_good_iterable(obj):\n return _is_iterable(obj) and _has_str_elems(obj)", "def _check_iterable(self):\n if self.theoretical_size is None:\n raise TypeError(\"This `fixture_ref` has not yet been initialized, so it cannot be unpacked/iterated upon. \"\n \"This...
[ "0.7090814", "0.7074417", "0.69252294", "0.6905731", "0.6872017", "0.6810469", "0.68096995", "0.68096995", "0.68021095", "0.66896445", "0.6655056", "0.65693426", "0.65589577", "0.65366095", "0.6519211", "0.65080535", "0.6505808", "0.6505808", "0.6505169", "0.647377", "0.64608...
0.0
-1
Load the draft results.
def test_load_draft(league): draft = league.draft_results() assert(len(draft) == 144) #mcdavid 1st assert(draft[0]['player_key'] == '396.p.6743') # carter hart 67th assert(draft[66]['player_key'] == '396.p.7156') # zadorov last assert(draft[-1]['player_key'] == '396.p.5995')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _load_enriched_results(self):\n return super().load_results()", "def load(self):\n self.results = pickle_load('results', self.main_dir)", "def run(self):\n results = self.fetch()\n return results", "def load_results_internal(self):\r\n filename = f\"{self.search_interna...
[ "0.6382324", "0.6246874", "0.5810889", "0.5807111", "0.57492447", "0.56064266", "0.5601259", "0.55607885", "0.54455495", "0.5442246", "0.5427821", "0.5390353", "0.53654075", "0.5347294", "0.53464603", "0.53339255", "0.53226596", "0.53156066", "0.5312531", "0.5308264", "0.5294...
0.6256367
1
Return dataframe of all free agents.
def test_get_free_agents_season_start(league, season_start_date): # equals all players minus drafted players # make sure none of the draft players in list free_agents = league.as_of(season_start_date).free_agents() drafted = league.draft_results(format='Pandas') assert(len(free_agents.index.intersection(drafted.index)) == 0), "Should be no drafted players as free agents" # could make sure all 'all_players' that weren't drafted are here
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "async def getFreeAgentRoles(self, ctx):\n server = ctx.message.server\n server_dict = self.get_server_dict(ctx)\n free_agent_dict = server_dict.setdefault(\"Free agent roles\", {})\n\n if(len(free_agent_dict.items()) > 0):\n for key, value in free_agent_dict.items():\n ...
[ "0.5804655", "0.5775972", "0.56420845", "0.55906963", "0.54590887", "0.54168296", "0.5397101", "0.5394799", "0.5385905", "0.5349997", "0.5243113", "0.5241917", "0.5230085", "0.5196623", "0.5092877", "0.5069075", "0.5068402", "0.50677747", "0.5053582", "0.5020693", "0.5011909"...
0.5439093
5
Return dataframe of all free agents.
def test_fantasy_status_nov_1(league): nov_1 = datetime.datetime(2019,11,1) players = league.as_of(nov_1).all_players() # make sure sammy blais is not a free agent, he was picked up oct 31 assert(players.loc[6544, 'fantasy_status'] != 'FA')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "async def getFreeAgentRoles(self, ctx):\n server = ctx.message.server\n server_dict = self.get_server_dict(ctx)\n free_agent_dict = server_dict.setdefault(\"Free agent roles\", {})\n\n if(len(free_agent_dict.items()) > 0):\n for key, value in free_agent_dict.items():\n ...
[ "0.5804655", "0.5775972", "0.56420845", "0.55906963", "0.54590887", "0.5439093", "0.54168296", "0.5397101", "0.5394799", "0.5385905", "0.5349997", "0.5243113", "0.5241917", "0.5230085", "0.5196623", "0.5092877", "0.5069075", "0.5068402", "0.50677747", "0.5053582", "0.5020693"...
0.0
-1
Return player on waivers for given time.
def test_get_waivers(league): pass
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def player(self):\n return self.players[self.tictactoe.turn]", "def get_player(self,p):\n self._validate(p)\n return p.player()", "async def get_player(self) -> Optional[andesite.Player]:\n ...", "def get_player(self):\n return self.player", "def get_player(self):\n ...
[ "0.6203599", "0.61011946", "0.59679496", "0.59320474", "0.59320474", "0.5928937", "0.5890586", "0.5869932", "0.5858092", "0.58263284", "0.5819567", "0.5788687", "0.57525885", "0.573477", "0.57151794", "0.57138526", "0.56666094", "0.5606717", "0.5583962", "0.5573942", "0.55579...
0.0
-1
Return team roster at given date.
def get_team_roster(league): pass
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "async def roster(\n self, ctx: commands.Context, season: Optional[YearFinder] = None, *, search: HockeyTeams\n ) -> None:\n season_str = None\n season_url = \"\"\n if season:\n if season.group(3):\n if (int(season.group(3)) - int(season.group(1))) > 1:\n ...
[ "0.6794363", "0.63804543", "0.5813007", "0.5741318", "0.57277983", "0.5647605", "0.5467505", "0.5346135", "0.5336544", "0.53275824", "0.53005236", "0.5272862", "0.5261342", "0.521931", "0.5192084", "0.514297", "0.51121926", "0.50987285", "0.5097064", "0.50702137", "0.50670063...
0.65519655
1
Detect which spins in each peak make up spin anchors.
def anchors(self): dims = self.dims anchors = [] for peak in self: possible_anchors = [] for combination in combinations(range(dims), 2): spins = [peak[i] for i in combination] if any(s.res_num is None or s.atom is None for s in spins): continue res_nums = [spin.res_num for spin in spins] atoms = [spin.atom for spin in spins] elements = [atom[0] for atom in atoms] positions = [atom[1:] for atom in atoms] same_res_num = res_nums[0] == res_nums[1] valid_pairs = [set(('H', 'N')), set(('H', 'C'))] is_proton_heavy_pair = set(elements) in valid_pairs same_position = all(c[0] == c[1] for c in zip(*positions)) if same_res_num and is_proton_heavy_pair and same_position: if '' in positions and set(elements) != set(('H', 'N')): # One of the atom names must have been 'H', 'N' or 'C' # Of these, only the amide proton anchor is valid continue if elements[0] == 'H': possible_anchors.append(combination) else: possible_anchors.append(combination[::-1]) if len(possible_anchors) > 1: pa_sets = [set(pa) for pa in possible_anchors] overlap = set.intersection(*pa_sets) if overlap: # Ambiguous, overlapping anchors continue for poss_anc in possible_anchors: if poss_anc not in anchors: anchors.append(poss_anc) anchors = tuple(anchors) return anchors
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def inPointing(self, pulsar):\n # initialise offset_deg to be a big old number\n # FWHM is in arcmin so always multiply by 60\n offset_deg = 5.\n\n # loop over pointings\n for point in self.pointingslist:\n # do a really basic check first\n\n glterm = (pulsa...
[ "0.595715", "0.59206724", "0.57927513", "0.57917154", "0.5787008", "0.5730863", "0.57080287", "0.5705941", "0.569645", "0.5671896", "0.56714284", "0.5667955", "0.5589922", "0.55761975", "0.55648017", "0.5513475", "0.55061954", "0.5487037", "0.547641", "0.54698986", "0.5455498...
0.5789159
4
Calibrate the chemical shifts of each spin in the peak list.
def calibrate_peaklist(peaklist, calibration, attr='shift'): if len(calibration) != peaklist.dims: raise ValueError('incorrect calibration list length') for peak in peaklist: for spin, cal in zip(peak, calibration): shift = getattr(spin, attr) shift -= cal setattr(spin, attr, shift) return peaklist
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def updatePeakShifts(peak):\n\n for peakDim in peak.peakDims:\n for contrib in peakDim.peakDimContribs:\n if contrib.peakDimComponent:\n continue\n updateResonShift(contrib.resonance,peakDim)", "def apply_calibration(self, cal):\n\n n_edges = len(self.channels) + 1\n channel_ed...
[ "0.61754125", "0.56453633", "0.5504699", "0.5399412", "0.5393503", "0.5272751", "0.5264761", "0.5158035", "0.5157666", "0.51486784", "0.51470107", "0.5123972", "0.51041424", "0.50907195", "0.5079858", "0.5070017", "0.5054352", "0.50522673", "0.5047045", "0.50407064", "0.50199...
0.6538406
0
Map each unique spin link to all of its corresponding peaks. NOESY peak lists represent spin links between Hydrogen atoms. Whether 2D, 3D or 4D, each peak in a NOESY peak list has exactly two Hydrogen spins. Here, a spin link is represented by a frozenset containing the spin.assignment tuples for each Hydrogen atom. This function returns a dictionary mapping each unique spin link to a list of the Peaks in the PeakList that contain those two Hydrogen atoms. Examples >>> spin_link_dict = peaklist.spin_link_dict() >>> spin_link, peaks = spin_link_dict.popitem() >>> spin_link frozenset([Assignment(res_type='Q', res_num=21, atom='HN'), Assignment( res_type='G', res_num=17, atom='HN')]) >>> print(peaks[0]) Peak(spins=[ Spin(res_type=G, res_num=17, atom=HN), Spin(res_type=G, res_num=17, atom=N), Spin(res_type=Q, res_num=21, atom=HN)]) >>> print(peaks[1]) Peak(spins=[ Spin(res_type=Q, res_num=21, atom=HN), Spin(res_type=Q, res_num=21, atom=N), Spin(res_type=G, res_num=17, atom=HN)]) Returns
def get_spin_link_dict(peaklist): spin_link_dict = {} for peak in peaklist: spins = [spin for spin in peak if spin.atom is not None and spin.atom[0] == 'H'] if len(spins) != 2: err = ('expected 2 Hydrogens in each peak, ' 'found %d' % len(spins)) raise ValueError(err) link = frozenset(spin.assignment for spin in spins) spin_link_dict.setdefault(link, []).append(peak) return spin_link_dict
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def mod_map(mods, plinkmap):\n modmap = {}\n for chrom in plinkmap:\n if chrom not in modmap:\n modmap[chrom] = []\n markers = plinkmap[chrom]\n modif = mods[chrom]\n for i, m in enumerate(modif):\n if m == 'I':\n p2 = float(markers[i+1][3])\n ...
[ "0.4860738", "0.4834604", "0.48159462", "0.4767646", "0.47199568", "0.46659744", "0.46064976", "0.4587481", "0.4538893", "0.45380697", "0.45124298", "0.44684395", "0.44490376", "0.4414482", "0.4384815", "0.43777874", "0.4372807", "0.43616307", "0.43376932", "0.43176925", "0.4...
0.7973843
0
Sort peaks by the assignments of their constituent spins. Sort the peaks by the assignments of spins in particular dimensions. The default order sorts the peaks by the dimensions associated with spin anchors first then by the remaining dimensions in the order they appear in each peak. Optionally place all commented peaks at the end of the peak list.
def sort_by_assignments(peaklist, order=None, commented_at_end=False): anchors = peaklist.anchors anchored = tuple(i for anchor in anchors for i in anchor) unanchored = set(range(peaklist.dims)) - set(anchored) default_order = anchored + tuple(sorted(unanchored)) order = order if order is not None else default_order peaklist.sort(key=lambda peak: tuple(peak[i] for i in order)) if commented_at_end: peaklist.sort(key=lambda peak: peak.commented) return peaklist
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _sort_cubelist(self, cubelist):\n sorted_cubelist = []\n realization_num = 1\n cubelist = cubelist.merge(unique=False)\n for cube in cubelist:\n # If time is a scalar coordinate, promote it to a dimension \n # coordinate, this is because all cubes must have the...
[ "0.5235512", "0.51029795", "0.5057203", "0.50256133", "0.4997207", "0.49645668", "0.49587327", "0.49490315", "0.4915132", "0.4911893", "0.48918042", "0.4875538", "0.4869072", "0.48368287", "0.4831977", "0.48246452", "0.48140344", "0.47824505", "0.47735858", "0.4757741", "0.47...
0.73416525
0
Return an shellescaped version of the string using single quotes. Reliably quote a string which may contain unsafe characters (e.g. space, quote, or other special characters such as '$'). The returned value can be used in a shell command line as one token that gets to be interpreted literally.
def SingleQuote(s): return pipes.quote(s)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def shlex_quote(s):\n if not s:\n return \"''\"\n # PKGW: builtin not available in Python 2\n ###if _find_unsafe(s) is None:\n ### return s\n\n # use single quotes, and put single quotes into double quotes\n # the string $'b is then quoted as '$'\"'\"'b'\n return \"'\" + s.replace(\"...
[ "0.7403372", "0.74031615", "0.71584487", "0.71584487", "0.7133629", "0.71020097", "0.7097107", "0.7079384", "0.6891607", "0.683838", "0.6683751", "0.66462994", "0.6627006", "0.6565741", "0.6501744", "0.64522266", "0.63975513", "0.6358231", "0.63522434", "0.6346998", "0.632059...
0.7261007
2
Return an shellescaped version of the string using double quotes. Reliably quote a string which may contain unsafe characters (e.g. space or quote characters), while retaining some shell features such as variable interpolation. The returned value can be used in a shell command line as one token that gets to be further interpreted by the shell. The set of characters that retain their special meaning may depend on the
def DoubleQuote(s): if not s: return '""' elif all(c in _SafeShellChars for c in s): return s else: return '"' + s.replace('"', '\\"') + '"'
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def shlex_quote(s):\n if not s:\n return \"''\"\n if _find_unsafe(s) is None:\n return s\n\n # use single quotes, and put single quotes into double quotes\n # the string $'b is then quoted as '$'\"'\"'b'\n return \"'\" + s.replace(\"'\", \"'\\\"'\\\"'\") + \"'\"", "def shlex_quote(s)...
[ "0.7271046", "0.7253869", "0.72336555", "0.7229503", "0.70458126", "0.6944718", "0.6929941", "0.69232047", "0.69232047", "0.6779495", "0.67524135", "0.6597111", "0.65538913", "0.6536574", "0.65330154", "0.65087634", "0.6507119", "0.64975196", "0.64822", "0.646473", "0.644913"...
0.7298411
0
Constructs a shell snippet for a command using a variable to shrink it. Takes into account all quoting that needs to happen.
def ShrinkToSnippet(cmd_parts, var_name, var_value): def shrink(value): parts = (x and SingleQuote(x) for x in value.split(var_value)) with_substitutions = ('"$%s"' % var_name).join(parts) return with_substitutions or "''" return ' '.join(shrink(part) for part in cmd_parts)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def substshell(command, path=None, output=os.devnull, mode='w'):\n _compile = SubstCommandCompiler(path)\n _compile.init_command(command)\n return functools.partial(_compile, output, mode)", "def make_shell_cmd(self, locals):\n\t\tdef cmd_shell():\n\t\t\timport code\n\t\t\tcode.interact(banner=self.shel...
[ "0.5757063", "0.53830165", "0.53762203", "0.5325956", "0.53113496", "0.5278688", "0.518739", "0.5154804", "0.51409817", "0.5108147", "0.50834435", "0.50418144", "0.5036694", "0.49932173", "0.49862692", "0.4973778", "0.49597052", "0.49114555", "0.49044985", "0.4899245", "0.489...
0.7213392
0
Opens a subprocess to execute a program and returns its return value.
def RunCmd(args, cwd=None): logger.debug(str(args) + ' ' + (cwd or '')) return Call(args, cwd=cwd)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def runProgram(cmd):\n try:\n p=subprocess.Popen(cmd,shell=False,stdout=subprocess.PIPE,stderr=subprocess.PIPE)\n (stdout, stderr) = p.communicate()\n except Exception as e:\n print(\"Error running program because: {0}\".format(e), file=errorOutput)\n return None\n else:\n ...
[ "0.7069853", "0.70137537", "0.6977988", "0.69574434", "0.69218326", "0.6837311", "0.68356645", "0.678155", "0.6761188", "0.6706355", "0.6686737", "0.66564906", "0.6626361", "0.66172713", "0.65875185", "0.65428317", "0.6520404", "0.6509777", "0.6488528", "0.6487521", "0.648116...
0.0
-1
Open a subprocess to execute a program and returns its output.
def GetCmdOutput(args, cwd=None, shell=False, env=None): (_, output) = GetCmdStatusAndOutput(args, cwd, shell, env) return output
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def run_subprocess(command):\n if verbose:\n print \"Running \" + str(command)\n proc = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)\n output = proc.communicate()[0]\n if verbose:\n print \"Output: \" + output\n\n if proc.returncode != 0:\n raise C...
[ "0.73115253", "0.7231892", "0.7162937", "0.7034998", "0.69283974", "0.6902304", "0.6880463", "0.68566847", "0.67947245", "0.6765262", "0.6710852", "0.66885346", "0.66741914", "0.6659031", "0.66580534", "0.6635002", "0.66338694", "0.6633255", "0.65943277", "0.6593683", "0.6587...
0.0
-1
Executes a subprocess and returns its exit code and output.
def GetCmdStatusAndOutput(args, cwd=None, shell=False, env=None, merge_stderr=False): status, stdout, stderr = GetCmdStatusOutputAndError( args, cwd=cwd, shell=shell, env=env, merge_stderr=merge_stderr) if stderr: logger.critical('STDERR: %s', stderr) logger.debug('STDOUT: %s%s', stdout[:4096].rstrip(), '<truncated>' if len(stdout) > 4096 else '') return (status, stdout)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def execute(cmd):\n\n process = subprocess.Popen(cmd,\n shell=True,\n stdin=subprocess.PIPE,\n stdout=subprocess.PIPE,\n stderr=subprocess.PIPE)\n out = ''\n err = ''\n exitcode = 0\n...
[ "0.75895786", "0.7547545", "0.7526934", "0.7474151", "0.7464935", "0.73460895", "0.72808135", "0.723191", "0.7178856", "0.71782106", "0.71392924", "0.71354", "0.7107621", "0.70249635", "0.6990342", "0.6971521", "0.6964739", "0.69470686", "0.6945275", "0.69408506", "0.69314843...
0.0
-1
Starts a subprocess and returns a handle to the process.
def StartCmd(args, cwd=None, shell=False, env=None): _ValidateAndLogCommand(args, cwd, shell) return Popen( args, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=shell, cwd=cwd, env=env)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def start(self):\n self._proc = self._get_subprocess()\n self._pid = self._proc.pid\n self._return_code = None", "def Start(self):\n\n\n\n assert not self._process, 'Start() can only be called once'\n self._process = subprocess.Popen(self._args)", "def start_process():\n global co...
[ "0.76355433", "0.7024509", "0.6961524", "0.6918989", "0.6882473", "0.68161005", "0.68090093", "0.6789304", "0.6741872", "0.6730431", "0.66940457", "0.66635656", "0.6652612", "0.66523474", "0.66228956", "0.64822423", "0.642592", "0.64208853", "0.63632655", "0.6342233", "0.6317...
0.68733007
5
Executes a subprocess and returns its exit code, output, and errors.
def GetCmdStatusOutputAndError(args, cwd=None, shell=False, env=None, merge_stderr=False): _ValidateAndLogCommand(args, cwd, shell) stderr = subprocess.STDOUT if merge_stderr else subprocess.PIPE pipe = Popen( args, stdout=subprocess.PIPE, stderr=stderr, shell=shell, cwd=cwd, env=env) stdout, stderr = pipe.communicate() return (pipe.returncode, stdout, stderr)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def run_subprocess(command):\n if verbose:\n print \"Running \" + str(command)\n proc = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)\n output = proc.communicate()[0]\n if verbose:\n print \"Output: \" + output\n\n if proc.returncode != 0:\n raise C...
[ "0.7698771", "0.76585543", "0.7596537", "0.75804675", "0.75774103", "0.75148445", "0.7452843", "0.73803294", "0.72769535", "0.72733176", "0.7248266", "0.723106", "0.7226016", "0.7157534", "0.71220666", "0.711173", "0.7100731", "0.707238", "0.70716333", "0.70658004", "0.706432...
0.65505224
77
An fcntlbased implementation of _IterProcessStdout.
def _IterProcessStdoutFcntl(process, iter_timeout=None, timeout=None, buffer_size=4096, poll_interval=1): # pylint: disable=too-many-nested-blocks import fcntl try: # Enable non-blocking reads from the child's stdout. child_fd = process.stdout.fileno() fl = fcntl.fcntl(child_fd, fcntl.F_GETFL) fcntl.fcntl(child_fd, fcntl.F_SETFL, fl | os.O_NONBLOCK) end_time = (time.time() + timeout) if timeout else None iter_end_time = (time.time() + iter_timeout) if iter_timeout else None while True: if end_time and time.time() > end_time: raise TimeoutError() if iter_end_time and time.time() > iter_end_time: yield None iter_end_time = time.time() + iter_timeout if iter_end_time: iter_aware_poll_interval = min(poll_interval, max(0, iter_end_time - time.time())) else: iter_aware_poll_interval = poll_interval read_fds, _, _ = select.select([child_fd], [], [], iter_aware_poll_interval) if child_fd in read_fds: data = _read_and_decode(child_fd, buffer_size) if not data: break yield data if process.poll() is not None: # If process is closed, keep checking for output data (because of timing # issues). while True: read_fds, _, _ = select.select([child_fd], [], [], iter_aware_poll_interval) if child_fd in read_fds: data = _read_and_decode(child_fd, buffer_size) if data: yield data continue break break finally: try: if process.returncode is None: # Make sure the process doesn't stick around if we fail with an # exception. process.kill() except OSError: pass process.wait()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _stdout_reader(self):\n self._is_launched.wait()\n stdout_iterator = iter(self._popen.stdout.readline, b\"\")\n for line in stdout_iterator:\n self._log(\"raw\", \"stdout : {0}\".format(line.strip()))\n self.stdout_queue.put_nowait(line.strip())\n self.stdout_q...
[ "0.6449587", "0.6135046", "0.60871077", "0.595699", "0.594129", "0.58840406", "0.5684317", "0.5682602", "0.5677694", "0.5666196", "0.565126", "0.55647177", "0.5556993", "0.55479926", "0.54802346", "0.5458966", "0.5452489", "0.54261523", "0.5415573", "0.539853", "0.53972363", ...
0.67283905
0
A Queue.Queuebased implementation of _IterProcessStdout.
def _IterProcessStdoutQueue(process, iter_timeout=None, timeout=None, buffer_size=4096, poll_interval=1): # pylint: disable=unused-argument if six.PY3: import queue else: import Queue as queue import threading stdout_queue = queue.Queue() def read_process_stdout(): # TODO(jbudorick): Pick an appropriate read size here. while True: try: output_chunk = _read_and_decode(process.stdout.fileno(), buffer_size) except IOError: break stdout_queue.put(output_chunk, True) if not output_chunk and process.poll() is not None: break reader_thread = threading.Thread(target=read_process_stdout) reader_thread.start() end_time = (time.time() + timeout) if timeout else None try: while True: if end_time and time.time() > end_time: raise TimeoutError() try: s = stdout_queue.get(True, iter_timeout) if not s: break yield s except queue.Empty: yield None finally: try: if process.returncode is None: # Make sure the process doesn't stick around if we fail with an # exception. process.kill() except OSError: pass process.wait() reader_thread.join()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _stdout_reader(self):\n self._is_launched.wait()\n stdout_iterator = iter(self._popen.stdout.readline, b\"\")\n for line in stdout_iterator:\n self._log(\"raw\", \"stdout : {0}\".format(line.strip()))\n self.stdout_queue.put_nowait(line.strip())\n self.stdout_q...
[ "0.7584206", "0.74641174", "0.68386096", "0.6770728", "0.6437304", "0.63062155", "0.6296086", "0.62914383", "0.6253849", "0.62142974", "0.62142974", "0.62142974", "0.62142974", "0.62142974", "0.62142974", "0.61727333", "0.61600655", "0.61525756", "0.61478114", "0.6145392", "0...
0.7113257
2
Executes a subprocess with a timeout.
def GetCmdStatusAndOutputWithTimeout(args, timeout, cwd=None, shell=False, logfile=None, env=None): _ValidateAndLogCommand(args, cwd, shell) output = six.StringIO() process = Popen( args, cwd=cwd, shell=shell, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, env=env) try: for data in _IterProcessStdout(process, timeout=timeout): if logfile: logfile.write(data) output.write(data) except TimeoutError: raise TimeoutError(output.getvalue()) str_output = output.getvalue() logger.debug('STDOUT+STDERR: %s%s', str_output[:4096].rstrip(), '<truncated>' if len(str_output) > 4096 else '') return process.returncode, str_output
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def call_timeout(args, **kwargs):\n\n timeout = kwargs.pop('timeout', 20)\n assert timeout > 0\n\n endtime = time.time() + timeout\n\n proc = subprocess.Popen(args, **kwargs)\n \n try:\n while time.time() < endtime:\n if proc.poll() != None:\n return proc.returnco...
[ "0.7861095", "0.74641967", "0.72873646", "0.7224932", "0.72131467", "0.7131527", "0.7093006", "0.70907867", "0.7079524", "0.70083195", "0.6727389", "0.65759695", "0.65237683", "0.6510072", "0.6489184", "0.6449738", "0.64226705", "0.63744366", "0.63308406", "0.6277528", "0.627...
0.566922
57
Executes a subprocess and continuously yields lines from its output.
def IterCmdOutputLines(args, iter_timeout=None, timeout=None, cwd=None, shell=False, env=None, check_status=True): cmd = _ValidateAndLogCommand(args, cwd, shell) process = Popen( args, cwd=cwd, shell=shell, env=env, stdout=subprocess.PIPE, stderr=subprocess.STDOUT) return _IterCmdOutputLines( process, cmd, iter_timeout=iter_timeout, timeout=timeout, check_status=check_status)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def run_command(self):\n\n while True:\n current_line = self.process.stdout.readline().rstrip()\n\n if not current_line:\n break\n\n yield self.decode_output(current_line)", "def runCommand(command):\n p = subprocess.Popen(command, std...
[ "0.74790484", "0.7069593", "0.69885415", "0.6885744", "0.6884901", "0.68284523", "0.6759169", "0.67222494", "0.65148145", "0.64916754", "0.63683236", "0.63626593", "0.6347323", "0.62500584", "0.6244789", "0.6235298", "0.6210111", "0.62086326", "0.6171837", "0.61539686", "0.61...
0.6275099
13
Create an AutoEncoder object.
def __init__(self, n_inpt, n_hidden, hidden_transfer='identity', out_transfer='identity', loss='squared', tied_weights=True, batch_size=None, optimizer='lbfgs', max_iter=1000, verbose=False): super(AutoEncoder, self).__init__( n_inpt, n_hidden, hidden_transfer, out_transfer, loss, tied_weights) self.batch_size = batch_size self.optimizer = optimizer self.f_transform = None self.f_reconstruct = None self.parameters.data[:] = np.random.standard_normal( self.parameters.data.shape).astype(theano.config.floatX) self.max_iter = max_iter self.verbose = verbose
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _define_encoder(self):\n raise NotImplementedError", "def build_autoencoder(input_dim):\r\n input_layer = Input(shape=(input_dim, 1))\r\n enc = Conv1D(filters=16, kernel_size=2, padding='same', activation='relu')(input_layer)\r\n enc = MaxPooling1D(pool_size=2, padding='same')(enc)\r\n enc = C...
[ "0.62622386", "0.6247434", "0.62234795", "0.6113021", "0.59711206", "0.59430516", "0.5825264", "0.5779037", "0.56837624", "0.56632996", "0.56139284", "0.5593676", "0.5560463", "0.55399996", "0.55361104", "0.5501795", "0.54680336", "0.54680336", "0.5461032", "0.54492253", "0.5...
0.5244221
29
Create a SparseAutoEncoder object.
def __init__(self, n_inpt, n_hidden, hidden_transfer='sigmoid', out_transfer='identity', reconstruct_loss='squared', c_sparsity=1, sparsity_loss='bern_bern_kl', sparsity_target=0.01, tied_weights=True, batch_size=None, optimizer='lbfgs', max_iter=1000, verbose=False): super(SparseAutoEncoder, self).__init__( n_inpt, n_hidden, hidden_transfer, out_transfer, reconstruct_loss, c_sparsity, sparsity_loss, sparsity_target, tied_weights) self.batch_size = batch_size self.optimizer = optimizer self.f_transform = None self.f_reconstruct = None self.parameters.data[:] = np.random.standard_normal( self.parameters.data.shape).astype(theano.config.floatX) self.max_iter = max_iter self.verbose = verbose
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def create_sparseDB():\n datas = data.Kmercount_to_matrix()\n datas.run()\n print('***Sparse matrix created***')", "def make_sparse(data):\n assert data.train_pos_edge_index is not None\n\n (row, col), N = data.train_pos_edge_index, data.num_nodes\n perm = (col * N + row).argsort()\n row, co...
[ "0.63621956", "0.62124324", "0.60858965", "0.59326434", "0.5876335", "0.5779511", "0.576974", "0.5758959", "0.5684206", "0.56325567", "0.56308913", "0.5587562", "0.55547976", "0.5466962", "0.5456364", "0.52503127", "0.5241987", "0.5203801", "0.5184267", "0.51695746", "0.51491...
0.63101643
1
Create a ContractiveAutoEncoder object.
def __init__(self, n_inpt, n_hidden, hidden_transfer='sigmoid', out_transfer='identity', reconstruct_loss='squared', c_jacobian=1, tied_weights=True, batch_size=None, optimizer='lbfgs', max_iter=1000, verbose=False): super(ContractiveAutoEncoder, self).__init__( n_inpt, n_hidden, hidden_transfer, out_transfer, reconstruct_loss, c_jacobian, tied_weights) self.batch_size = batch_size self.optimizer = optimizer self.f_transform = None self.f_reconstruct = None self.parameters.data[:] = np.random.standard_normal( self.parameters.data.shape).astype(theano.config.floatX) self.max_iter = max_iter self.verbose = verbose
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _construct_ae(self):\n if self.joint_train:\n self.critic.trainable = False\n autoencoder = Model(self.encoder.input,\n [self.decoder(self.encoder.output),\n self.critic(self.encoder.output)])\n autoencoder.c...
[ "0.59766114", "0.5971633", "0.58636886", "0.57971674", "0.565155", "0.5615126", "0.54221565", "0.53008664", "0.52868307", "0.5247722", "0.524635", "0.52189344", "0.52125883", "0.51889825", "0.51658624", "0.51599807", "0.5148567", "0.5141506", "0.51331234", "0.511349", "0.5087...
0.5832884
3
Create a DenoisingAutoEncoder object.
def __init__(self, n_inpt, n_hidden, hidden_transfer='sigmoid', out_transfer='identity', reconstruct_loss='squared', noise_type='gauss', c_noise=.2, tied_weights=True, batch_size=None, optimizer='lbfgs', max_iter=1000, verbose=False): super(DenoisingAutoEncoder, self).__init__( n_inpt, n_hidden, hidden_transfer, out_transfer, reconstruct_loss, noise_type, c_noise, tied_weights) self.batch_size = batch_size self.optimizer = optimizer self.f_transform = None self.f_reconstruct = None climin.initialize.randomize_normal(self.parameters.data) self.max_iter = max_iter self.verbose = verbose
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _define_encoder(self):\n raise NotImplementedError", "def build_autoencoder(input_dim):\r\n input_layer = Input(shape=(input_dim, 1))\r\n enc = Conv1D(filters=16, kernel_size=2, padding='same', activation='relu')(input_layer)\r\n enc = MaxPooling1D(pool_size=2, padding='same')(enc)\r\n enc = C...
[ "0.6016746", "0.55889976", "0.545366", "0.54502213", "0.5415948", "0.54003376", "0.54001844", "0.53689516", "0.534991", "0.5316329", "0.5203007", "0.51553774", "0.5107471", "0.5106341", "0.5100852", "0.50989753", "0.5094069", "0.5073897", "0.50694823", "0.5016635", "0.5007310...
0.6117516
0
restrict to the content's language
def test_languages(self, client): root = Node.root() en = Type1(title="Hello World", state="published", node=root, language="en").save() nl = Type1(title="Hoi Wereld", state="published", node=root, language="nl").save() enspoke = en.spoke() feed = WheelFeed(enspoke) assert en.content_ptr in feed.items() assert nl.content_ptr not in feed.items()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def is_forced(self, lang):\r\n return False", "def process_request(self, request):\n\n if request.COOKIES.get('site_language'):\n if request.COOKIES['site_language'] == '':\n language = 'fr'\n else:\n language = request.COOKIES['site_language']\n ...
[ "0.6518715", "0.6337609", "0.62430346", "0.61772054", "0.6115925", "0.6033609", "0.60031694", "0.60027224", "0.59728533", "0.59191805", "0.5822976", "0.5796649", "0.5761532", "0.5755782", "0.5725612", "0.57049775", "0.5696995", "0.56784564", "0.56742376", "0.5659563", "0.5643...
0.52251047
81
Eliminate duplicates in a sorted list. Returns a new sorted list with the same elements in list1, but with no duplicates. This function can be iterative.
def remove_duplicates(list1): tample = [float('inf')] for elem in list1: if elem in tample: continue tample.append(elem) return tample[1:]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def remove_duplicates(list1):\n temp_list = []\n\n # for each element in list1 check if is already in the temp list\n # if it is not yet there, copy it.\n for item in list1:\n if item not in temp_list:\n temp_list.append(item)\n\n return temp_list", "def remove_duplicates(list1):...
[ "0.82367706", "0.8172117", "0.8020477", "0.7850647", "0.76956123", "0.76429844", "0.7602725", "0.750563", "0.74992764", "0.742413", "0.73368996", "0.7323583", "0.7320335", "0.7309752", "0.7279414", "0.7272679", "0.72361135", "0.72262025", "0.721125", "0.718744", "0.7170773", ...
0.8031551
2
Compute the intersection of two sorted lists. Returns a new sorted list containing only elements that are in both list1 and list2. This function can be iterative.
def intersect(list1, list2): intersection_list = [] list1_idx = 0 list2_idx = 0 while list2_idx < len(list2) and list1_idx < len(list1): if list2[list2_idx] == list1[list1_idx]: intersection_list.append(list2[list2_idx]) list1_idx += 1 list2_idx += 1 elif list2[list2_idx] > list1[list1_idx]: list1_idx += 1 else: list2_idx += 1 return intersection_list
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def intersection(list1, list2):\n list3 = [value for value in list1 if value in list2]\n return list3", "def intersect(list1, list2):\r\n if len(list1) == 0 or len(list2) == 0:\r\n return []\r\n else:\r\n if list1[0] == list2[0]:\r\n new_list = list([list1[0]])\r\n ...
[ "0.81946236", "0.8157978", "0.8081259", "0.802535", "0.79895693", "0.7963669", "0.79506075", "0.7948855", "0.7923734", "0.7879321", "0.7839831", "0.7740573", "0.76637924", "0.76356244", "0.76244", "0.75989366", "0.7588414", "0.75148344", "0.75115436", "0.74970204", "0.7495804...
0.8113677
2
Merge two sorted lists. Returns a new sorted list containing those elements that are in either list1 or list2. This function can be iterative.
def merge(list1, list2): res = [] index_i, index_j = 0, 0 while index_i < len(list1) and index_j < len(list2): if list1[index_i] <= list2[index_j]: res.append(list1[index_i]) index_i += 1 else: res.append(list2[index_j]) index_j += 1 res += list1[index_i:] res += list2[index_j:] return res
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def merge_lists(list_1, list_2):\n if len(list_1) == 0:\n return list_2\n if len(list_2) == 0:\n return list_1\n\n new_list = []\n length = len(list_1) + len(list_2)\n while len(new_list) < length:\n if len(list_1) == 0:\n new_list = new_list + list_2\n elif le...
[ "0.81090695", "0.80801195", "0.8072913", "0.80316395", "0.8031528", "0.8013592", "0.8011788", "0.7974852", "0.795297", "0.7931757", "0.7878907", "0.7860104", "0.7858302", "0.78444135", "0.78015894", "0.7787144", "0.7747671", "0.7741996", "0.7730882", "0.76854265", "0.76350236...
0.7963335
8
Sort the elements of list1. Return a new sorted list with the same elements as list1. This function should be recursive.
def merge_sort(list1): if len(list1) <= 1: return list1 left = merge_sort(list1[:len(list1)/2]) right = merge_sort(list1[len(list1)/2:]) return merge(left, right)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def merge_sort(in_list1: list) -> list:\n if in_list1 is None:\n return []\n if len(in_list1) == 1:\n return [in_list1[0]]\n _list1,_list2= in_list1[:int(((len(in_list1)+1)/2))],\\\n in_list1[int(((len(in_list1)+1)/2)):]\n _ordered_list1 = merge_sort(_list1)\n _o...
[ "0.76767325", "0.732384", "0.7290804", "0.7187142", "0.7152374", "0.71103555", "0.7107798", "0.7064439", "0.6912521", "0.6830091", "0.6827197", "0.67525494", "0.66716707", "0.6565023", "0.6531662", "0.6518769", "0.65044874", "0.64856726", "0.6469198", "0.6412194", "0.6409195"...
0.70278823
8
Generate all strings that can be composed from the letters in word in any order. Returns a list of all strings that can be formed from the letters in word. This function should be recursive.
def gen_all_strings(word): if word == '': return [''] else: first = word[0] rest = word[1:] rest_strings = gen_all_strings(rest) all_words = [] for string in rest_strings: for leter in range(len(string)+1): all_words.append(string[0:leter]+first+string[leter:]) return rest_strings + all_words
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def gen_all_strings(word):\n if not word:\n return [\"\"]\n \n all_strings = []\n for string in gen_all_strings(word[1:]):\n for letter_idx in range(len(string) + 1):\n all_strings.append(string[letter_idx:] + word[0] + string[:letter_idx])\n \n return gen_all_strings(wor...
[ "0.8034503", "0.7417961", "0.7359351", "0.7112968", "0.7106626", "0.68921024", "0.6748245", "0.6620748", "0.64630806", "0.63716835", "0.6366732", "0.6345676", "0.63174015", "0.6314541", "0.62938344", "0.6261687", "0.62095034", "0.6180001", "0.61566484", "0.61404824", "0.61245...
0.7376517
2
Load word list from the file named filename. Returns a list of strings.
def load_words(filename): url = codeskulptor.file2url(filename) netfile = urllib2.urlopen( "http://codeskulptor-assets.commondatastorage.googleapis.com/assets_scrabble_words3.txt") words_file = netfile.readlines() words = [word[:-2] for word in words_file] return words
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def load_wordlist(filename):\n with open(filename) as f:\n \tdata = f.read().splitlines()\n return data", "def loadWords():\n inFile = open(wordFile, 'r')\n wordlist = []\n for line in inFile:\n wordlist.append(line)\n return wordlist", "def loadWords():\n # inFile: file\n inF...
[ "0.8982847", "0.85380477", "0.85197324", "0.84018725", "0.8372622", "0.8360392", "0.8360392", "0.8360392", "0.83501935", "0.83436346", "0.83436346", "0.8328561", "0.8309319", "0.82969534", "0.82897973", "0.82897973", "0.8267962", "0.8260606", "0.8249803", "0.8249803", "0.8249...
0.812224
43
Fixture for setting up configuration parser
def setup_config(): config = configparser.ConfigParser() config.read(CONFIG_PATH) return config
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def setUp(self):\n self.parser = create_parser()", "def setup(self):\n file_under_test = os.path.join(os.curdir, 'application-core',\n 'app.core.config.xml')\n with open(file_under_test) as f:\n config = f.read()\n self.config = objecti...
[ "0.70605105", "0.7017709", "0.7011017", "0.692149", "0.692149", "0.690285", "0.68758297", "0.68144774", "0.6782708", "0.6702887", "0.66660374", "0.6653196", "0.66530657", "0.6643987", "0.6641886", "0.6610034", "0.6574361", "0.65678746", "0.6562894", "0.6540612", "0.6537637", ...
0.7371306
0
Fixture for retrieving mock event
def get_mock_event(): event = { "httpMethod": "GET", "//body": "{\"name\": \"Sam\"}", "resource": "/{proxy+}", "queryStringParameters": {}, "pathParameters": { "proxy": "users" }, "requestContext": { "accountId": "222222222", "identity": { "sourceIp": "2a02:a445:6d36:1:1e3:a188:313c:1d31", "userAgent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_1_6) " "AppleWebKit/537.36 (KHTML, like Gecko) Chrome/51.0.2743.116 Safari/537.36", }, "resourcePath": "/{proxy+}", "httpMethod": "GET", "apiId": "xxxxxxxxxx" } } return event
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def event_factory_fixture():\n def _factory(device_id, event_type=\"DEVICE_EVENT\", capability='',\n attribute='Updated', value='Value', data=None):\n event = Mock()\n event.event_type = event_type\n event.device_id = device_id\n event.component_id = 'main'\n e...
[ "0.74541396", "0.724666", "0.72374237", "0.71662945", "0.7073701", "0.7027783", "0.68203604", "0.68195695", "0.680202", "0.6784378", "0.6762648", "0.67611665", "0.66722715", "0.66431737", "0.6619424", "0.65345937", "0.65251654", "0.65242076", "0.65172464", "0.6512973", "0.650...
0.74155563
1
Unit test get_ip_type_by_address method of the Bad Bots class
def test_get_ip_type_by_address(setup_config, get_mock_event): # !ARRANGE! bad_bots = BadBots(setup_config, get_mock_event) ipv4_address_1 = '1.1.1.1' ipv4_address_2 = '11.22.33.44' ipv4_address_3 = '123.123.123.123' ipv6_address_1 = '2a02:a445:6d36:1:1e3:a188:313c:1d31' ipv6_address_2 = '3731:54:65fe:2::a7' ipv6_address_3 = 'fd07:a47c:3742:823e:3b02:76:982b:463' # !ACT! # Detect the IP type of provided IP addresses ipv4_address_1_type = bad_bots.get_ip_type_by_address(ipv4_address_1) ipv4_address_2_type = bad_bots.get_ip_type_by_address(ipv4_address_2) ipv4_address_3_type = bad_bots.get_ip_type_by_address(ipv4_address_3) ipv6_address_1_type = bad_bots.get_ip_type_by_address(ipv6_address_1) ipv6_address_2_type = bad_bots.get_ip_type_by_address(ipv6_address_2) ipv6_address_3_type = bad_bots.get_ip_type_by_address(ipv6_address_3) # !ASSERT! # Assert IP addresses are of type IPv4 assert ipv4_address_1_type.value == BadBots.SourceIPType.IPV4.value assert ipv4_address_2_type.value == BadBots.SourceIPType.IPV4.value assert ipv4_address_3_type.value == BadBots.SourceIPType.IPV4.value # Assert IP addresses are of type IPv6 assert ipv6_address_1_type.value == BadBots.SourceIPType.IPV6.value assert ipv6_address_2_type.value == BadBots.SourceIPType.IPV6.value assert ipv6_address_3_type.value == BadBots.SourceIPType.IPV6.value
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_ip_addresses_exists():\n load_ips()\n validate_names()", "def test_external_ip_get_kind(self):\n assert_equal(self.test_external_ip.get_kind(), 'mpexternalip')", "def test_ipam_ip_addresses_read(self):\n pass", "def test_get_source_ip(self):\n pass", "def test_ip(self):\n ...
[ "0.67973435", "0.6795078", "0.67154825", "0.66857165", "0.66440994", "0.6609387", "0.65462726", "0.6507168", "0.649237", "0.6404722", "0.63306016", "0.6302462", "0.6274524", "0.6213682", "0.6204644", "0.62016076", "0.61814064", "0.6152368", "0.6097284", "0.60947865", "0.60787...
0.78166175
0
Unit test check_bot_confidence method of the Bad Bots class
def test_check_bot_confidence(setup_config, get_mock_event): # !ARRANGE! bad_bots = BadBots(setup_config, get_mock_event) bot_1 = Bot() bot_1.source_ip = '1.1.1.1' bot_1.http_query_string_parameters = '<script></script>' bot_1.http_body = 'EXEC' bot_1.geolocation = 'United States' bot_1.source_ip_type = BadBots.SourceIPType.IPV4 bot_1.http_method = "CONNECT" bot_1.http_user_agent = "Mozilla/5.0 (compatible; Sosospider/2.0; +http://help.soso.com/webspider.htm)" bot_2 = Bot() bot_2.source_ip = '77.168.51.231' bot_2.http_query_string_parameters = 'hello' bot_2.http_body = 'hello!' bot_2.geolocation = 'Netherlands' bot_2.source_ip_type = BadBots.SourceIPType.IPV4 bot_2.http_method = "GET" bot_2.http_user_agent = "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/77.0.3865.90 Safari/537.36" bot_3 = Bot() bot_3.source_ip = '2a02:a445:6d36:1:1e3:a188:313c:1d33' bot_3.http_query_string_parameters = 'param=true' bot_3.http_body = 'username=xxx' bot_3.geolocation = 'United States' bot_3.source_ip_type = BadBots.SourceIPType.IPV6 bot_3.http_method = "GET" bot_3.http_user_agent = "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/77.0.3865.90 Safari/537.36" # !ACT! # Do confidence check on potential bots confidence_score_bot_1 = bad_bots.check_bot_confidence(bot_1) confidence_score_bot_2 = bad_bots.check_bot_confidence(bot_2) confidence_score_bot_3 = bad_bots.check_bot_confidence(bot_3) # !ASSERT! # Assert IP addresses are of type IPv4 assert(confidence_score_bot_1 == 25) assert(confidence_score_bot_2 == 0) assert(confidence_score_bot_3 == 5)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_word_confidences(self):\n self._api.SetImageFile(self._image_file)\n words = self._api.AllWords()\n self.assertEqual(words, [])\n self._api.Recognize()\n words = self._api.AllWords()\n confidences = self._api.AllWordConfidences()\n self.assertEqual(len(word...
[ "0.67971265", "0.6142417", "0.61158496", "0.6029153", "0.57504106", "0.572192", "0.56564856", "0.5608764", "0.5544521", "0.554337", "0.55383706", "0.55253214", "0.54930353", "0.5490882", "0.5488589", "0.54793096", "0.5459387", "0.5425729", "0.540067", "0.5386406", "0.5373596"...
0.7511095
0
Generates IDL files from a template for user and system marshaling.
def _Main(): cmd_parser = argparse.ArgumentParser( description='Tool to generate IDL from template.') cmd_parser.add_argument('--idl_template_file', dest='idl_template_file', type=str, required=True, help='Input IDL template file.') cmd_parser.add_argument('--idl_output_file', type=str, required=True, help='Output IDL file.') flags = cmd_parser.parse_args() _GenerateIDLFile(flags.idl_template_file, flags.idl_output_file)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def buildAutogenContents(self):\n if len(self.mTemplates) == 0:\n return None\n \n content = \"/** Autogenerated temporary file for template instantiation. */\\n\"\n for t in self.mTemplates:\n template_type = t.mTemplateType\n typedef_name = t.mTypedefName\n con...
[ "0.60924524", "0.5882521", "0.5875414", "0.5807833", "0.57298577", "0.5695035", "0.56720126", "0.55950695", "0.55856544", "0.55830556", "0.557712", "0.55616045", "0.555261", "0.5524907", "0.5512638", "0.5499624", "0.5496154", "0.5461171", "0.54406464", "0.54035246", "0.540047...
0.6245206
0
add rankig to each node using google pagerank algorithm
def add_pagerank(self): query = ''' MATCH (c1:)-[r:INTERACTS]->(c2:) RETURN c1.name, c2.name, r.weight AS weight ''' ig = IGraph.TupleList(self.graph.run(query), weights=True) pg = ig.pagerank() pgvs = [] for p in zip(ig.vs, pg): print(p) pgvs.append({"name": p[0]["name"], "pg": p[1]}) write_clusters_query = ''' UNWIND {nodes} AS n MATCH (c:) WHERE c.name = n.name SET c.pagerank = n.pg ''' self.graph.run(write_clusters_query, nodes=pgvs)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def update_rank(self):\n self.__rank += 1", "def rank():\n return 0", "def set_rank_order(self):\n for k in self._run:\n self._run[k].sort(key=lambda x:x.get_rank(),reverse=False)\n tot_res = len(self._run[k])\n for r in self._run[k]:\n r.set_sco...
[ "0.6782681", "0.6664084", "0.6492858", "0.64766073", "0.6452059", "0.6438518", "0.64112085", "0.6404493", "0.62282807", "0.6224711", "0.62225074", "0.61820155", "0.6177658", "0.61345315", "0.61322975", "0.6032352", "0.6016183", "0.59991485", "0.59905785", "0.5985397", "0.5966...
0.75397
0
add community membership to each node using walktrap algorithm implemented in igraph
def add_communites(self): query = ''' MATCH (c1:)-[r:INTERACTS]->(c2:) RETURN c1.name, c2.name, r.weight AS weight ''' ig = IGraph.TupleList(self.graph.run(query), weights=True) clusters = IGraph.community_walktrap(ig, weights="weight").as_clustering() nodes = [{"name": node["name"]} for node in ig.vs] for node in nodes: idx = ig.vs.find(name=node["name"]).index node["community"] = clusters.membership[idx] write_clusters_query = ''' UNWIND {nodes} AS n MATCH (c:) WHERE c.name = n.name SET c.community = toInt(n.community) ''' self.graph.run(write_clusters_query, nodes=nodes)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def assign_communities(graph):\n communities = nx.algorithms.community\\\n .greedy_modularity_communities(nx.Graph(graph))\n for node in graph.nodes:\n graph.nodes[node]['community'] = [i for i,c in enumerate(communities)\n ...
[ "0.5967038", "0.5503575", "0.54628575", "0.5436496", "0.54320776", "0.54097146", "0.53649384", "0.53567284", "0.5344505", "0.53354657", "0.5303637", "0.5284366", "0.52824575", "0.52807456", "0.525839", "0.52522177", "0.52431047", "0.52199847", "0.5210583", "0.51947844", "0.51...
0.6357265
0
Advance the time reference by the given amount.
def advance_by(self, amount: float): if amount < 0: raise ValueError("cannot retreat time reference: amount {} < 0" .format(amount)) self.__delta += amount
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def advance(self, amount):\n raise NotImplementedError()", "def advance(self, amount=1):\n self._current += amount\n if self._current - self._updateRate >= self._lastUpdated:\n self.redraw()\n # go to nearest multiple of updateRate less than current\n self._l...
[ "0.74948883", "0.7390248", "0.7373512", "0.72310776", "0.7102698", "0.70964295", "0.688938", "0.6812329", "0.6760769", "0.6728943", "0.64948493", "0.6393024", "0.6281656", "0.6251204", "0.619589", "0.61536574", "0.6126449", "0.60908896", "0.6087889", "0.6015486", "0.60153484"...
0.8371448
0
Advance the time reference so that now is the given timestamp.
def advance_to(self, timestamp: float): now = self.__original_time() if timestamp < now: raise ValueError("cannot retreat time reference: " "target {} < now {}" .format(timestamp, now)) self.__delta = timestamp - now
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def change_time(self, new_time):\r\n self.when = new_time", "def update_timestamp(self):\n self._timestamp = datetime.datetime.now()", "def advance_time(self, set_to=None, increment_by=None):\n self._time_condition.acquire()\n if set_to is not None:\n self._time = set_to\...
[ "0.6708446", "0.6519798", "0.63856924", "0.63803643", "0.6373168", "0.63460845", "0.6326127", "0.62440217", "0.62124366", "0.6148152", "0.614451", "0.61350346", "0.6095682", "0.604849", "0.6047468", "0.60337085", "0.60114896", "0.6003276", "0.6003276", "0.6003276", "0.6003276...
0.7582537
0
Email the given document to the given email address.
def email_document(document, to, template='django_dms/email.txt', subject=''): # Start a new thread to email the document # This avoids a frozen screen while the email is being sent (particularly if the document is big). t = threading.Thread(target=_email_document, args=[document, to, template, subject]) t.setDaemon(True) t.start()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _email_document(document, to, template='django_dms/email.txt', subject=''): \n # TODO: A really cool system would delay sending the email for 10 seconds or so, \n # to allow the user to quickly undo :-) This could probably also be done client-side (ie JS)\n # Create the message\n message = EmailMes...
[ "0.7192423", "0.62836236", "0.6040218", "0.6037075", "0.59470624", "0.591903", "0.591903", "0.591903", "0.591903", "0.591903", "0.591903", "0.591903", "0.591903", "0.591903", "0.591903", "0.58806413", "0.58806413", "0.58301985", "0.5820901", "0.5814947", "0.5814947", "0.581...
0.7277005
0
Helper function to email document in another thread.
def _email_document(document, to, template='django_dms/email.txt', subject=''): # TODO: A really cool system would delay sending the email for 10 seconds or so, # to allow the user to quickly undo :-) This could probably also be done client-side (ie JS) # Create the message message = EmailMessage(to=to, subject=subject) message.to = to message.subject = subject message.body = render_to_string(template, {'document': document}) message.attach(document.friendly_filename, document.file.read(), document.file_mimetype) # Send the message message.send()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def email_document(document, to, template='django_dms/email.txt', subject=''):\n # Start a new thread to email the document\n # This avoids a frozen screen while the email is being sent (particularly if the document is big).\n t = threading.Thread(target=_email_document, args=[document, to, template, subj...
[ "0.7538517", "0.6329317", "0.5854818", "0.57976854", "0.57880086", "0.57461154", "0.57316077", "0.5677812", "0.56468177", "0.56263226", "0.5602722", "0.5572509", "0.55519354", "0.554117", "0.55247223", "0.5500545", "0.546613", "0.5464859", "0.5459716", "0.5438996", "0.5438996...
0.662225
1
Get the list of url patterns for this view.
def get_urls(self): return patterns('', url(r'^$', self.list, name="%s_document_list" % self.name), url(r'^upload/$', self.new_upload, name="%s_document_upload" % self.name), url(r'^([^\/]+)/download/$', self.download, name="%s_document_download" % self.name), url(r'^([^\/]+)/send/$', self.send, name="%s_document_send" % self.name), url(r'^([^\/]+)/send/ajax/$', self.send_ajax, name="%s_document_send_ajax" % self.name), url(r'^([^\/]+)/detail/$', self.detail, name="%s_document_detail" % self.name), url(r'^([^\/]+)/view/$', self.view, name="%s_document_view" % self.name), url(r'^([^\/]+)/delete/$', self.delete, name="%s_document_delete" % self.name), url(r'^(?P<object_id>([^\/]+))/detail/(?P<direction>up|down|clear)vote/?$', self.vote, name="%s_document_vote" % self.name), )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def urlpatterns(self) -> list:\n raise NotImplementedError()", "def get_urlpatterns(cls):\n cls.validate_urlpattern_with_options()\n return map(lambda s: s.format(**cls.urlpattern_options), cls.urlpatterns)", "def get_urls(self):\n return patterns('')", "def urls(self):\n p...
[ "0.8027784", "0.79029524", "0.78296137", "0.7634182", "0.69524354", "0.6944128", "0.67643464", "0.6622399", "0.6592495", "0.6579387", "0.653932", "0.64818805", "0.6450613", "0.6444061", "0.6438293", "0.64050204", "0.6334296", "0.6327021", "0.63207394", "0.62634665", "0.623646...
0.6526326
11
Send the specified document to the user's email address (HTML version).
def send(self, request, id, tribe_slug): tribe = get_object_or_404(Tribe, slug=tribe_slug) document = self.get_document(id, tribe_slug) form = self._set_user_email_address(request) email = self._get_user_email_address(request) if form or not email: return render_to_response('django_dms/send.html', locals(), context_instance=RequestContext(request)) # NB: Temporarily disabling actual email sending for development #email_document(document, to=[email], subject='Document: %s' % document.title) print "Sending email to %s" % email # Send a signal to let everyone know about this document interaction document_interaction.send(sender=self, document=document, mode="sent", request=request, recipient=email) return HttpResponseRedirect(reverse('%s_document_list' % self.name, args=(tribe_slug, ) ))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _email_document(document, to, template='django_dms/email.txt', subject=''): \n # TODO: A really cool system would delay sending the email for 10 seconds or so, \n # to allow the user to quickly undo :-) This could probably also be done client-side (ie JS)\n # Create the message\n message = EmailMes...
[ "0.7611227", "0.73684156", "0.67148703", "0.6527169", "0.6408137", "0.6307394", "0.6218779", "0.6209792", "0.61815774", "0.6166898", "0.616678", "0.6158147", "0.60088944", "0.5979466", "0.5952957", "0.5952957", "0.59498113", "0.59447414", "0.5928036", "0.59016263", "0.5876797...
0.6786847
2
Send the specified document to the user's email address (AJAX version).
def send_ajax(self, request, id, tribe_slug): document = self.get_document(id, tribe_slug) form = self._set_user_email_address(request) email = self._get_user_email_address(request) if not email and not form: form = EmailForm() if form: content = '<form class="ajax_update_email" action="%s" method="post">' % reverse('%s_document_send' % self.name, args=[getattr(document, self.url_identifier_field)]) content += '%s<input type="submit" value="Send"/></form>' % form['email'] return HttpResponse(content) print "Sending email to %s" % email #email_document(document, to=[email], subject='Document: %s' % document.title) # Send a signal to let everyone know about this document interaction document_interaction.send(sender=self, document=document, mode="sent", request=request, recipient=email) return HttpResponse('Email sent to %s' % email)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def send(self, request, id, tribe_slug):\n tribe = get_object_or_404(Tribe, slug=tribe_slug)\n document = self.get_document(id, tribe_slug)\n \n form = self._set_user_email_address(request)\n email = self._get_user_email_address(request)\n if form or not email:\n ...
[ "0.7083903", "0.6592813", "0.6592162", "0.6246286", "0.5818268", "0.58028346", "0.5787325", "0.5658083", "0.56290865", "0.5581146", "0.55060256", "0.5494548", "0.5458515", "0.54515415", "0.544656", "0.5391331", "0.5326866", "0.53013676", "0.52664655", "0.5242208", "0.5226229"...
0.7920855
0
Gets a custom defined or default email address for the current user.
def _get_user_email_address(self, request): return request.session.get(SESSION_VAR_EMAIL_ADDRESS, not request.user.is_anonymous() and request.user.email)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_user_email(self):\n member = self.get_user()\n if member:\n return member.getProperty('email')", "def get_email(self):\n return self.reference[REF_EMAIL_ADDRESS][REF_VALUE]", "def user_email(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"user_email\")", ...
[ "0.79681355", "0.77469647", "0.7720694", "0.76566875", "0.7597106", "0.75681674", "0.7504296", "0.74986595", "0.74986595", "0.74986595", "0.74986595", "0.7492597", "0.7372196", "0.7372196", "0.7372196", "0.7372132", "0.73386735", "0.72807336", "0.72577626", "0.7211655", "0.72...
0.793554
1
If a new email address is posted, remember it.
def _set_user_email_address(self, request): if request.method == 'POST': form = EmailForm(request.POST) if form.is_valid(): request.session[SESSION_VAR_EMAIL_ADDRESS] = form.cleaned_data['email'] else: return form
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def check_duplicate_email(self, email):\r\n request = self.req_factory.post('unused_url', data={\r\n 'new_email': email,\r\n 'password': 'test',\r\n })\r\n request.user = self.user\r\n self.assertFailedRequest(self.run_request(request), 'An account with this e-mail...
[ "0.65422535", "0.64260054", "0.63118654", "0.63064903", "0.60563016", "0.6038557", "0.6014613", "0.5898787", "0.58314365", "0.5781677", "0.577932", "0.5769288", "0.575497", "0.5719079", "0.57139456", "0.5708547", "0.5708547", "0.5700854", "0.5699276", "0.56723505", "0.5661490...
0.6590844
0
Get an item for the template, containing the document.
def _get_list_item(self, document): list_item = Item() list_item.document = document # TODO: call callables? list_item.display_fields = [self._prepare_field(document, field) for field in self.list_display] return list_item
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_object(self):\n if not self._item:\n self._item = get_object_or_404(Item, pk=self.kwargs['item_id'])\n return self._item", "def getItemTemplate(self):\n self.update()\n if self.resource_item_template is None:\n return None\n\n return ResourceItem(s...
[ "0.6883214", "0.6577675", "0.6520346", "0.6520346", "0.63746023", "0.6368625", "0.6285564", "0.6244785", "0.61841923", "0.61830777", "0.6121958", "0.61106426", "0.60945314", "0.6082757", "0.60224175", "0.6003464", "0.59755844", "0.59741163", "0.59633183", "0.5937414", "0.5898...
0.55808055
54
Get the list of url patterns for this view.
def get_urls(self): return patterns('', #url(r'^$', self.new_upload, name="%s_document_list" % self.name), url(r'^([^\/]+)/edit/$', self.edit, name="%s_document_edit" % self.name), url(r'^([\w\d-]+)/confirm/$', self.confirm, name="%s_document_confirm" % self.name) )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def urlpatterns(self) -> list:\n raise NotImplementedError()", "def get_urlpatterns(cls):\n cls.validate_urlpattern_with_options()\n return map(lambda s: s.format(**cls.urlpattern_options), cls.urlpatterns)", "def get_urls(self):\n return patterns('')", "def urls(self):\n p...
[ "0.8027784", "0.79029524", "0.78296137", "0.7634182", "0.69524354", "0.6944128", "0.67643464", "0.6622399", "0.6592495", "0.6579387", "0.653932", "0.6526326", "0.64818805", "0.6450613", "0.6444061", "0.6438293", "0.64050204", "0.6334296", "0.6327021", "0.63207394", "0.6263466...
0.6040245
37
Print list of instances with their attached volume id/size to console, ie
def list_ebss_by_instance(): ec2 = u.create_ec2_resource() instances = [(u.seconds_from_datetime(i.launch_time), i) for i in ec2.instances.all()] sorted_instances = sorted(instances, key=itemgetter(0)) for (seconds, instance) in sorted_instances: volumes = instance.volumes.all() volume_strs = [] for v in volumes: volume_strs.append("%s (%s)"%(v.id, v.size)) print("%s: %s" % (u.get_name(instance.tags), ','.join(volume_strs)))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def show_asm_volumes(self):\n sql = \"select NAME from v$asm_diskgroup_stat ORDER BY 1\"\n self.cur.execute(sql)\n res = self.cur.fetchall()\n key = ['{#ASMVOLUME}']\n lst = []\n for i in res:\n d = dict(zip(key, i))\n lst.append(d)\n print(jso...
[ "0.66387", "0.65122116", "0.64756095", "0.6264545", "0.61685747", "0.61603457", "0.6125444", "0.61109924", "0.6072397", "0.60526633", "0.60198474", "0.59469944", "0.5940197", "0.5938797", "0.59375834", "0.5925745", "0.5900101", "0.5888648", "0.58722466", "0.5867416", "0.58226...
0.6720033
0
Grows EBS volume for given task.
def grow_ebs_for_task(task_fragment, target_size_gb): ec2 = u.create_ec2_resource() client = u.create_ec2_client() # todo: don't crash on missing/duplicate names instances = {u.get_name(i.tags): i for i in ec2.instances.all()} ec2 = u.create_ec2_resource() instances = [(u.seconds_from_datetime(i.launch_time), i) for i in ec2.instances.all()] sorted_instances = reversed(sorted(instances, key=itemgetter(0))) for (seconds, instance) in sorted_instances: task_name = u.get_name(instance.tags) hours_ago = (time.time()-seconds)/3600 hours_ago+=8 # adjust for time being in UTC if task_fragment in task_name: print("Found instance %s launched %.1f hours ago" %( task_name, hours_ago)) break print(instance.id) volumes = list(instance.volumes.all()) assert len(volumes)==1, "Must have 1 volume" print("Growing %s to %s"%(volumes[0].id, target_size_gb)) response = client.modify_volume( VolumeId=volumes[0].id, Size=target_size_gb, ) assert u.is_good_response(response)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def grow_volume(self, volume, growth, async=False):\n\n assert isinstance(volume, dict), \"volume configuration is invalid, 'dict' type expected\"\n assert volume.get('id'), \"volume.id can't be blank\"\n\n async_result = __node__['bollard'].apply_async('api.postgresql.grow-volume',\n ...
[ "0.64236933", "0.6261913", "0.60468847", "0.587187", "0.5659941", "0.5560449", "0.53718984", "0.534629", "0.5339353", "0.53099704", "0.52602965", "0.5249011", "0.518899", "0.5125479", "0.50864977", "0.5082796", "0.5080378", "0.50407803", "0.50402933", "0.5031618", "0.5022762"...
0.69317895
0
This class tests the PyTorchYolo object detector.
def get_pytorch_yolo(get_default_cifar10_subset): import cv2 import torch from pytorchyolo import models from pytorchyolo.utils.loss import compute_loss from art.estimators.object_detection.pytorch_yolo import PyTorchYolo model_path = "/tmp/PyTorch-YOLOv3/config/yolov3.cfg" weights_path = "/tmp/PyTorch-YOLOv3/weights/yolov3.weights" model = models.load_model(model_path=model_path, weights_path=weights_path) class YoloV3(torch.nn.Module): def __init__(self, model): super().__init__() self.model = model def forward(self, x, targets=None): if self.training: outputs = self.model(x) # loss is averaged over a batch. Thus, for patch generation use batch_size = 1 loss, loss_components = compute_loss(outputs, targets, self.model) loss_components_dict = {"loss_total": loss} return loss_components_dict else: return self.model(x) model = YoloV3(model) object_detector = PyTorchYolo( model=model, input_shape=(3, 416, 416), clip_values=(0, 1), attack_losses=("loss_total",) ) n_test = 10 (_, _), (x_test_cifar10, y_test_cifar10) = get_default_cifar10_subset x_test_cifar10 = x_test_cifar10[0:n_test] x_test = cv2.resize( x_test_cifar10[0].transpose((1, 2, 0)), dsize=(416, 416), interpolation=cv2.INTER_CUBIC ).transpose((2, 0, 1)) x_test = np.expand_dims(x_test, axis=0) x_test = np.repeat(x_test, repeats=2, axis=0) # Create labels result = object_detector.predict(x=x_test) y_test = [ { "boxes": result[0]["boxes"], "labels": result[0]["labels"], "scores": np.ones_like(result[0]["labels"]), }, { "boxes": result[1]["boxes"], "labels": result[1]["labels"], "scores": np.ones_like(result[1]["labels"]), }, ] yield object_detector, x_test, y_test
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def detection(input_path, output_path, yolo_model_path):\n detector = VideoObjectDetection()\n # this function sets the model type of the object detection instance you created to the YOLOv3 model\n detector.setModelTypeAsYOLOv3()\n # this function accepts a string that must be the path to the model fil...
[ "0.6627789", "0.6447896", "0.6315877", "0.62284786", "0.6174531", "0.61566085", "0.6096233", "0.60372835", "0.58174103", "0.5799088", "0.5774036", "0.5773982", "0.57679653", "0.56752574", "0.5674558", "0.5668986", "0.5649171", "0.5648845", "0.5647306", "0.5644756", "0.5634993...
0.7094565
0
Splits image into tiles by size of tile. tile_w tile width tile_h tile height
def split_image_into_tiles_of_size(arr: Image, tile_w: int, tile_h: int, overlap: int): x_axis = -1 y_axis = -2 arr_width, arr_height = arr.shape[x_axis], arr.shape[y_axis] x_ntiles = ( arr_width // tile_w if arr_width % tile_w == 0 else (arr_width // tile_w) + 1 ) y_ntiles = ( arr_height // tile_h if arr_height % tile_h == 0 else (arr_height // tile_h) + 1 ) tiles = [] # row for i in range(0, y_ntiles): # height of this tile ver_f = tile_h * i ver_t = ver_f + tile_h # col for j in range(0, x_ntiles): # width of this tile hor_f = tile_w * j hor_t = hor_f + tile_w tile = get_tile(arr, hor_f, hor_t, ver_f, ver_t, overlap) tiles.append(tile) tile_shape = [tile_h, tile_w] ntiles = dict(x=x_ntiles, y=y_ntiles) padding = dict(left=0, right=0, top=0, bottom=0) if arr_width % tile_w == 0: padding["right"] = 0 else: padding["right"] = tile_w - (arr_width % tile_w) if arr_height % tile_h == 0: padding["bottom"] = 0 else: padding["bottom"] = tile_h - (arr_height % tile_h) info = dict(tile_shape=tile_shape, ntiles=ntiles, overlap=overlap, padding=padding) return tiles, info
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _split_image_into_tiles(\n self, image: np.ndarray\n ) -> t.Sequence[t.Tuple[t.Tuple[t.Any, ...], np.ndarray]]:\n h, w, c = image.shape\n tile_height = (\n math.ceil(h / (self._n_tiles // 2 - 1))\n if self._n_tiles > 4\n else math.ceil(h / (self._n_tiles...
[ "0.76203066", "0.71610355", "0.69022644", "0.6677268", "0.6614609", "0.6609202", "0.6596505", "0.6574447", "0.6529179", "0.652334", "0.650743", "0.650743", "0.6493861", "0.6485636", "0.64644873", "0.6450046", "0.644942", "0.6383776", "0.6369156", "0.634634", "0.63084143", "...
0.7902135
0
Splits image into tiles by number of tile. x_ntiles number of tiles horizontally y_ntiles number of tiles vertically
def split_image_into_number_of_tiles( arr: Image, x_ntiles: int, y_ntiles: int, overlap: int ): img_width, img_height = arr.shape[-1], arr.shape[-2] tile_w = img_width // x_ntiles tile_h = img_height // y_ntiles return split_image_into_tiles_of_size(arr, tile_w, tile_h, overlap)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _split_image_into_tiles(\n self, image: np.ndarray\n ) -> t.Sequence[t.Tuple[t.Tuple[t.Any, ...], np.ndarray]]:\n h, w, c = image.shape\n tile_height = (\n math.ceil(h / (self._n_tiles // 2 - 1))\n if self._n_tiles > 4\n else math.ceil(h / (self._n_tiles...
[ "0.7743909", "0.7442206", "0.7223278", "0.6875224", "0.6805701", "0.6805701", "0.68043333", "0.6783657", "0.67152864", "0.6648476", "0.6550898", "0.6506808", "0.6499578", "0.644437", "0.64369965", "0.64329946", "0.6417711", "0.6391121", "0.6388546", "0.637921", "0.6316766", ...
0.8323475
0
Draw the contours over a blank array. The function cv2.DrawContours overlays the contours on top of the bitwise array. Which is not ideal if the bitwise array contains some small, noisy contours. Therefore, I created an empty array first and then used this as the base for drawing the contours onto.
def draw_contours(self): contours, hierarchy = cv2.findContours(self.edged, cv2.RETR_CCOMP, cv2.CHAIN_APPROX_TC89_L1) edged = cv2.bitwise_not(self.edged) rgb = cv2.cvtColor(edged, cv2.COLOR_GRAY2RGB) temp_array = np.ones([rgb.shape[0], rgb.shape[1], rgb.shape[2]]) contours_ = cv2.drawContours(temp_array, contours, -1, (0, 0, 0), thickness=1) ml_filename = 'MLOutputs/' + str(self.file_name) + 'Clusters' + str(self.clusters) + \ 'FilterSize' + str(self.filter_size) + 'Sigma' + str(self.sigma) + 'UniqueColours' + \ str(self.unique_colours) + ".png" plt.imshow(contours_, cmap="gray") if not os.path.isfile(ml_filename): imsave(ml_filename, contours_, cmap="gray")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def draw_contours(self, image_path):\n img = cv.imread(image_path, cv.IMREAD_ANYDEPTH)\n contoured = np.zeros((img.shape[0], img.shape[1], 3), dtype=np.uint8)\n unique, counts = np.unique(img, return_counts=True)\n for uni in unique:\n if uni == 0:\n continue\n...
[ "0.62755567", "0.62009114", "0.60225683", "0.5922091", "0.5880212", "0.58467317", "0.5801622", "0.5658485", "0.56459415", "0.56298447", "0.5628904", "0.5601753", "0.5587474", "0.55055207", "0.5471264", "0.5443541", "0.54303265", "0.54265594", "0.542418", "0.54153204", "0.5408...
0.5654723
8
Return True if object is an instance that inherited from specified\ class, otherwise False python3 c 'print(__import__("my_module").my_function.__doc__)' python3 c 'print(__import__("my_module").MyClass.my_function.__doc__)'
def inherits_from(obj, a_class): if issubclass(type(obj), a_class) and not type(obj) == a_class: return True else: return False
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def isclassinstance(object):\n if not hasattr(object, \"__class__\"):\n return False\n if isbuiltin(object.__class__):\n return False\n return True", "def isclass(object):\n if not inspect.isclass(object):\n return False\n if isbuiltin(object):\n return False\n retur...
[ "0.6786016", "0.6415582", "0.6202909", "0.6114083", "0.60608363", "0.60413224", "0.6035273", "0.60166055", "0.59785694", "0.59465903", "0.5944888", "0.5933893", "0.59169173", "0.5910051", "0.59036976", "0.5892804", "0.5881093", "0.5877024", "0.5872548", "0.58636963", "0.58568...
0.59816
8
Compile the .qrc (Qt Designer resource files) into .py files.
def compile_resources(resourcename="images", location="."): res_count_ok = 0 res_count_ko = 0 for qrc_file in os.listdir(location): # Loop through directory if qrc_file.endswith(".qrc"): # We have a candidate file print(f"---> Found {qrc_file}") # get the filename without extension base_filename, _ = os.path.splitext(os.path.basename(qrc_file)) # Make the target name target_filename = f"{base_filename}_rc.py" # Run! result = subprocess.run(["pyrcc5", "-o", target_filename, f"{location}/{qrc_file}"], capture_output=True) if result.returncode == 0: print(f"[v] Resource compiled to {target_filename}") res_count_ok += 1 else: print(f"[e] An error occured {result}") res_count_ko += 1 return res_count_ok, res_count_ko
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def generate_qrc(path, name=\"resources\", prefix=\"icons\"):\n qrc = '<RCC>\\n\\t<qresource prefix=\"{}\">\\n'.format(prefix)\n for each in sorted(os.listdir(path)):\n qrc += \"\\t\\t<file>{0}</file>\\n\".format(each)\n qrc += \"\\t</qresource>\\n</RCC>\\n\"\n\n qrc_file = os.path.join(path, na...
[ "0.67200714", "0.65681183", "0.6396254", "0.6029277", "0.5712012", "0.5677451", "0.56494296", "0.5627389", "0.5508585", "0.5422793", "0.5350092", "0.530345", "0.52731526", "0.5244704", "0.5227191", "0.5177347", "0.5123454", "0.5108174", "0.5087578", "0.50874686", "0.5061226",...
0.6065464
3
Generates an array of ppxf_util.gaussian emission lines to be used as gas templates in PPXF. Generally, these templates represent the instrumental line spread function (LSF) at the set of wavelengths of each emission line. In this case, pPXF will return the intrinsic (i.e. astrophysical) dispersion of the gas lines. Alternatively, one can input FWHM_gal=0, in which case the emission lines are deltafunctions and pPXF will return a dispersion which includes both the intrumental and the intrinsic disperson. Additional lines can be easily added by editing the code of this procedure, which is meant as a template to be modified by the users where needed. For accuracy the ppxf_util.gaussians are integrated over the pixels boundaries. This can be changed by setting `pixel`=False. The [OI], [OIII] and [NII] doublets are fixed at theoretical flux ratio~3. The [OII] and [SII] doublets can be restricted to physical range of ratios. The Balmet Series can be fixed to the theoretically predicted decrement.
def emission_lines(logLam_temp, lamRange_gal, FWHM_gal, pixel=True, tie_balmer=False, limit_doublets=False, vacuum=False): if tie_balmer: # Balmer decrement for Case B recombination (T=1e4 K, ne=100 cm^-3) # Table 4.4 of Dopita & Sutherland 2003 https://www.amazon.com/dp/3540433627 # Balmer: Htheta Heta Hzeta Heps Hdelta Hgamma Hbeta Halpha wave = np.array([3797.90, 3835.39, 3889.05, 3970.07, 4101.76, 4340.47, 4861.33, 6562.80]) # air wavelengths if vacuum: wave = ppxf_util.air_to_vac(wave) gauss = ppxf_util.gaussian(logLam_temp, wave, FWHM_gal, pixel) ratios = np.array([0.0530, 0.0731, 0.105, 0.159, 0.259, 0.468, 1, 2.86]) ratios *= wave[-2]/wave # Account for varying pixel size in Angstrom emission_lines = gauss @ ratios line_names = ['Balmer'] w = (wave > lamRange_gal[0]) & (wave < lamRange_gal[1]) line_wave = np.mean(wave[w]) if np.any(w) else np.mean(wave) else: # Use fewer lines here, as the weak ones are difficult to measure # Balmer: Hdelta Hgamma Hbeta Halpha line_wave = [4101.76, 4340.47, 4861.33, 6562.80] # air wavelengths if vacuum: line_wave = ppxf_util.air_to_vac(line_wave) line_names = ['Hdelta', 'Hgamma', 'Hbeta', 'Halpha'] emission_lines = ppxf_util.gaussian(logLam_temp, line_wave, FWHM_gal, pixel) if limit_doublets: # The line ratio of this doublet lam3729/lam3726 is constrained by # atomic physics to lie in the range 0.28--1.47 (e.g. fig.5.8 of # Osterbrock & Ferland 2005 https://www.amazon.co.uk/dp/1891389343/). # We model this doublet as a linear combination of two doublets with the # maximum and minimum ratios, to limit the ratio to the desired range. # -----[OII]----- wave = [3726.03, 3728.82] # air wavelengths if vacuum: wave = ppxf_util.air_to_vac(wave) names = ['[OII]3726_d1', '[OII]3726_d2'] gauss = ppxf_util.gaussian(logLam_temp, wave, FWHM_gal, pixel) doublets = gauss @ [[1, 1], [0.28, 1.47]] # produces *two* doublets emission_lines = np.column_stack([emission_lines, doublets]) line_names = np.append(line_names, names) line_wave = np.append(line_wave, wave) # The line ratio of this doublet lam6716/lam6731 is constrained by # atomic physics to lie in the range 0.44--1.43 (e.g. fig.5.8 of # Osterbrock & Ferland 2005 https://www.amazon.co.uk/dp/1891389343/). # We model this doublet as a linear combination of two doublets with the # maximum and minimum ratios, to limit the ratio to the desired range. # -----[SII]----- wave = [6716.47, 6730.85] # air wavelengths if vacuum: wave = ppxf_util.air_to_vac(wave) names = ['[SII]6731_d1', '[SII]6731_d2'] gauss = ppxf_util.gaussian(logLam_temp, wave, FWHM_gal, pixel) doublets = gauss @ [[0.44, 1.43], [1, 1]] # produces *two* doublets emission_lines = np.column_stack([emission_lines, doublets]) line_names = np.append(line_names, names) line_wave = np.append(line_wave, wave) else: # Here the doublets are free to have any ratio # -----[OII]----- -----[SII]----- wave = [3726.03, 3728.82, 6716.47, 6730.85] # air wavelengths if vacuum: wave = ppxf_util.air_to_vac(wave) names = ['[OII]3726', '[OII]3729', '[SII]6716', '[SII]6731'] gauss = ppxf_util.gaussian(logLam_temp, wave, FWHM_gal, pixel) emission_lines = np.column_stack([emission_lines, gauss]) line_names = np.append(line_names, names) line_wave = np.append(line_wave, wave) # To keep the flux ratio of a doublet fixed, we place the two lines in a single template # -----[OIII]----- wave = [4958.92, 5006.84] # air wavelengths if vacuum: wave = ppxf_util.air_to_vac(wave) doublet = ppxf_util.gaussian(logLam_temp, wave, FWHM_gal, pixel) @ [0.33, 1] emission_lines = np.column_stack([emission_lines, doublet]) line_names = np.append(line_names, '[OIII]5007_d') # single template for this doublet line_wave = np.append(line_wave, wave[1]) # To keep the flux ratio of a doublet fixed, we place the two lines in a single template # -----[OI]----- wave = [6300.30, 6363.67] # air wavelengths if vacuum: wave = ppxf_util.air_to_vac(wave) doublet = ppxf_util.gaussian(logLam_temp, wave, FWHM_gal, pixel) @ [1, 0.33] emission_lines = np.column_stack([emission_lines, doublet]) line_names = np.append(line_names, '[OI]6300_d') # single template for this doublet line_wave = np.append(line_wave, wave[0]) # To keep the flux ratio of a doublet fixed, we place the two lines in a single template # -----[NII]----- wave = [6548.03, 6583.41] # air wavelengths if vacuum: wave = ppxf_util.air_to_vac(wave) doublet = ppxf_util.gaussian(logLam_temp, wave, FWHM_gal, pixel) @ [0.33, 1] emission_lines = np.column_stack([emission_lines, doublet]) line_names = np.append(line_names, '[NII]6583_d') # single template for this doublet line_wave = np.append(line_wave, wave[1]) #added by anja to ppxf_util.emission_lines version # To keep the flux ratio of a doublet fixed, we place the two lines in a single template # -----[NI]----- wave = [5197.90, 5200.39] # air wavelengths if vacuum: wave = ppxf_util.air_to_vac(wave) doublet = ppxf_util.gaussian(logLam_temp, wave, FWHM_gal, pixel) @ [1, 0.7] emission_lines = np.column_stack([emission_lines, doublet]) line_names = np.append(line_names, '[NI]5200_d') # single template for this doublet line_wave = np.append(line_wave, wave[1]) #---------------------- # Only include lines falling within the estimated fitted wavelength range. # w = (line_wave > lamRange_gal[0]) & (line_wave < lamRange_gal[1]) emission_lines = emission_lines[:, w] line_names = line_names[w] line_wave = line_wave[w] print('Emission lines included in gas templates:') print(line_names) return emission_lines, line_names, line_wave
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def Generate_BG_Template(outputSize=300, angularSize = 10, fileOut = 'BGRateMap.pickle' ):\r\n template = np.zeros((outputSize,outputSize))\r\n ppd=float(outputSize)/float(angularSize) # pixels per deg\r\n \r\n events110 = ParseFermi.Import_File('photons.txt', energyRange = (120000,140000),lonRange=(-...
[ "0.5699116", "0.5599356", "0.5230809", "0.5207666", "0.5170641", "0.5158478", "0.51580274", "0.5144128", "0.5076723", "0.5075624", "0.50331193", "0.50331193", "0.5024959", "0.49573076", "0.49408728", "0.49217612", "0.48975858", "0.48955354", "0.48943654", "0.48930743", "0.487...
0.58730155
0
Defining prior distributions for the model.
def set_priors(parnames, limits, linenames, vsyst, nssps=1): priors = {} for parname in parnames: name = parname.split("_")[0] if name in limits: #all the CvD ssp parameters vmin, vmax = limits[name] # print(parname,vmin,vmax) delta = vmax - vmin priors[parname] = stats.uniform(loc=vmin, scale=delta) elif parname in vsyst: priors[parname] = stats.norm(loc=vsyst[parname], scale=500) elif parname == "eta": #what does eta do? priors["eta"] = stats.uniform(loc=1., scale=19)#uniform distribution in range [1,19] elif parname == "nu": #what does nu do? priors["nu"] = stats.uniform(loc=2, scale=20)#uniform distribution in range [2,20] elif parname == "sigma": priors["sigma"] = stats.uniform(loc=50, scale=300)#obtains the uniform distribution on [loc, loc + scale]. i.e. uniform in range [50,300] elif parname == "sigma_gas": priors[parname] = stats.uniform(loc=50, scale=100)#uniform between [50,100]km/s elif name == "w": priors[parname] = stats.uniform(loc=0, scale=1)#weights uniform between 0 and 1 elif name in linenames: # priors[parname] = stats.expon(loc=0, scale=0.5)#favors low values>~0; make even stronger by decreasing scale. priors[parname] = stats.expon(loc=0, scale=0.2)#favors low values>~0; make even stronger by decreasing scale. elif name in ["pred", "pblue"]: porder = int(parname.split("_")[1]) if porder == 0: mu, sd = 1 / nssps, 1 a, b = (0 - mu) / sd, (np.infty - mu) / sd priors[parname] = stats.truncnorm(a, b, mu, sd) else: priors[parname] = stats.norm(0, 0.05) else: print(f"parameter without prior: {parname}") return priors
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def gaussian_prior(self):\n self.prior = sps.multivariate_normal(self.m0,self.S0)", "def prior_distribution(self):\n out = self.model.forward(self.inducing_points)\n return MultivariateNormal(out.mean, out.lazy_covariance_matrix.evaluate_kernel())", "def get_prior(self):\n assert se...
[ "0.7287337", "0.7219811", "0.698138", "0.69521374", "0.6878878", "0.6738291", "0.65607095", "0.65233296", "0.6507224", "0.64965516", "0.6484119", "0.64804757", "0.64787775", "0.6447634", "0.6433855", "0.6417294", "0.6408151", "0.64017296", "0.6352515", "0.6351014", "0.6346401...
0.0
-1
Calculates the probability of a model.
def log_probability(theta): global priors global logp lp = np.sum([priors[p].logpdf(x) for p, x in zip(logp.parnames, theta)]) if not np.isfinite(lp) or np.isnan(lp): return -np.inf ll = logp(theta) if not np.isfinite(ll): return -np.inf return lp + ll
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _compute_model_prob(self, per_list_logodds):\n with tf.compat.v1.name_scope(name='compute_model_prob'):\n return tf.stop_gradient(\n tf.exp(-self._alpha *\n (per_list_logodds -\n tf.reduce_min(per_list_logodds, axis=2, keepdims=True))))", "def calculate_proba...
[ "0.7188488", "0.7145453", "0.6747787", "0.67278534", "0.65877354", "0.6577112", "0.6537247", "0.6531125", "0.6514498", "0.65023434", "0.64306843", "0.63726646", "0.63619375", "0.63538843", "0.62945044", "0.6287746", "0.62856567", "0.62772304", "0.62686586", "0.62612885", "0.6...
0.0
-1
Combine SSP traces to have mass/luminosity weighted properties
def weighted_traces(parnames, trace, nssps): weights = np.array([trace["w_{}".format(i+1)].data for i in range( nssps)]) wtrace = [] for param in parnames: data = np.array([trace["{}_{}".format(param, i+1)].data for i in range(nssps)]) t = np.average(data, weights=weights, axis=0) wtrace.append(Table([t], names=["{}_weighted".format(param)])) return hstack(wtrace)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def plot_combined_variation(nums, SSC, band, rms):\n\n def get_spectra(nums, SSC, band, rms):\n spectrum = spectra[str(SSC['no'])][band]\n frequency = spectrum['frequency'].to(u.GHz)\n intensity = spectrum['spectrum'].to(u.K)\n # shift spectrum to rest frequency\n velshift =...
[ "0.53677773", "0.52855563", "0.52082103", "0.5192049", "0.50655925", "0.50220954", "0.5019231", "0.4980716", "0.496982", "0.49639016", "0.496379", "0.49591964", "0.49528778", "0.49409777", "0.4926693", "0.49150053", "0.48961598", "0.4841809", "0.48396593", "0.48142034", "0.48...
0.5468182
1
Call function to transform and format common fields in results.
def __call__(self, results): if 'img' in results: img = results['img'] if len(img.shape) < 3: img = np.expand_dims(img, -1) img = np.ascontiguousarray(img.transpose(2, 0, 1)) results['img'] = DC(to_tensor(img), stack=True) if 'gt_semantic_seg' in results: # convert to long results['gt_semantic_seg'] = DC(to_tensor( results['gt_semantic_seg'][None, ...].astype(np.int64)), stack=True) if 'gt_masks' in results: results['gt_masks'] = DC(to_tensor(results['gt_masks'])) if 'gt_labels' in results: results['gt_labels'] = DC(to_tensor(results['gt_labels'])) return results
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def transform():", "def transform():\n pass", "def transform(self):", "def _apply_transform(self):\n pass", "def _output_format(cls, func, override=None):\n @wraps(func)\n def _format_wrapper(self, *args, **kwargs):\n json_response, data_key, meta_data_key = func(self...
[ "0.6625306", "0.6302095", "0.6020546", "0.59190804", "0.5870707", "0.5793297", "0.573849", "0.57165796", "0.56867325", "0.56854814", "0.56783605", "0.5602666", "0.55980164", "0.5573735", "0.55417204", "0.55297184", "0.5522978", "0.5522028", "0.54894423", "0.5454132", "0.54165...
0.0
-1
update learning rate of optimizers
def updatelearningrate(self, epoch): self.lr = getlearningrate(epoch=epoch, opt=self.opt) # update learning rate of model optimizer if isinstance(self.model, list): count = 0 for param_group in self.optimzer.param_groups: # if type(model) is <list> then update modules with different learning rate param_group['lr'] = self.lr count += 1 # print ">>> count is:", count-1 else: for param_group in self.optimzer.param_groups: param_group['lr'] = self.lr
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _update_learning_rate(self):\r\n\r\n self.n_steps += 1\r\n lr = self.factor * self._get_lr_scale()\r\n for param_group in self._optimizer.param_groups:\r\n param_group['lr'] = lr", "def update_learning_rate(self):\n self.scheduler.step()\n lr = self.optimizer.par...
[ "0.82430375", "0.8091104", "0.8081948", "0.8057003", "0.7882125", "0.7729992", "0.7728464", "0.7728464", "0.77180463", "0.7680515", "0.7672647", "0.7670474", "0.76550764", "0.76442766", "0.762199", "0.76109266", "0.7595715", "0.758477", "0.7532394", "0.7522077", "0.75184435",...
0.81921303
1
construct any person or destroyable object
def __init__( self, id: int, coordinates: Tuple[int, int], name: str, health: 'Health', ai: 'Ai', parts: Dict[str, 'Part'], actions: Set['Action'], skills: Set['Skill'] = None, effects: Effects = None, team: 'Team' = None, ) -> None: self.id = id self.position = coordinates self.name = name self.health = health self.ai = ai self.parts = parts self.actions = actions self.skills = skills or set() self.effects = effects or Effects() self.team = team
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def construct_persona(x):\n return Persona(x)", "def make_object():\n return object()", "def create_person(person: Person = Body(...)):\n return person", "def new_object(self):\r\n\t\tpass", "def new(self, obj):\n pass", "def instantiate(obj):\n return obj() if isinstance(obj, type) el...
[ "0.726856", "0.7046766", "0.6489531", "0.6340825", "0.6317375", "0.62946725", "0.6185293", "0.61081696", "0.6088009", "0.6050287", "0.59465456", "0.59465456", "0.59365165", "0.5905628", "0.5864767", "0.58611476", "0.585532", "0.5852625", "0.58511806", "0.58508277", "0.5850827...
0.0
-1
choose targets and actions
def do_action(self, ally: Set['Entity'], enemy: Set['Entity']) -> None: self.effects.update() available_actions, available_ally_targets, available_enemy_targets = self.get_actions(ally, enemy) action, target = self.ai.choose_action(self, available_actions, available_ally_targets, available_enemy_targets) if not action: action = pass_action target = self action.do(self, target)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def execute(self, targets):", "def choose_action(self):\r\n pass", "def Action(self):\n for target in self._targets:\n target.Action()", "def choose_action(self, board, possible_actions):\r\n pass", "def target_act(self, obs_all_agents, noise=0.0):\n target_actions = ...
[ "0.6905978", "0.66720754", "0.64456934", "0.64400035", "0.6392093", "0.6392093", "0.63541424", "0.63423127", "0.6292723", "0.6272857", "0.6237775", "0.6205516", "0.61756647", "0.6061701", "0.6053934", "0.6031384", "0.6015014", "0.59976435", "0.59927475", "0.596754", "0.591768...
0.0
-1
get list of available actions
def get_actions(self, ally: Set['Entity'], enemy: Set['Entity']) -> Tuple[ Set['Action'], Set['Entity'], Set['Entity']]: available_actions = set() for action in self.actions: if action.cool_down.name not in [effect.name for effect in self.effects.effects]: available_actions.add(action) # print(f'{self.name_color} has {[action.name for action in available_actions]}') return available_actions, ally, enemy
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_available_actions(self):\n return self.actions", "def get_actions(self):\n return []", "def get_list_of_actions(self):\n return self.actions", "def actions(self):\n return self._action_list", "def get_available_actions(self, state):\n pass", "def _get_actions(self):...
[ "0.88604504", "0.8727885", "0.8649391", "0.8512479", "0.8356988", "0.8249469", "0.8249469", "0.8249469", "0.8200832", "0.80950415", "0.80412275", "0.7978333", "0.7978333", "0.7912648", "0.7903887", "0.7893374", "0.78658706", "0.77716225", "0.77087396", "0.7706551", "0.7704463...
0.0
-1
Measure distance between two entities
def measure_distance(actor: Entity, target: Entity) -> int: dst = int(actor is not target) return dst
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def distance(self, other):\n ...", "def dist(gene1, gene2):\n return abs(len(gene1.goal) - len(gene2.goal))", "def _object_distance(self, object1, object2):\n return np.linalg.norm(np.array(object1) - np.array(object2))", "def __distance_to(self, other: Any) -> float:\n return...
[ "0.7556397", "0.74695444", "0.74531186", "0.7383299", "0.7376668", "0.7279911", "0.72782665", "0.72525454", "0.72392297", "0.72290546", "0.72261333", "0.7214202", "0.7161101", "0.7153692", "0.7150545", "0.7123836", "0.7117742", "0.71136373", "0.71060133", "0.7101069", "0.7084...
0.0
-1
Return apitools message object for give message name.
def GetApiMessage(message_name): messages = apis.GetMessagesModule(_BQ_API, _BQ_API_VERSION) return getattr(messages, message_name)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def msg(name):\n msg = Message.ByKeys(name)\n if msg is not None:\n txt = msg.message_ml\n if msg_is_ignored(name):\n txt = IGNORE_PREFIX + txt\n else:\n misc.cdblogv(misc.kLogErr, 0,\n \"bomcreator: could not find message '%s'\" % name)\n txt = n...
[ "0.69583714", "0.65979904", "0.64236474", "0.62720525", "0.62713873", "0.625511", "0.6236496", "0.6140541", "0.61393994", "0.6108671", "0.6034471", "0.6006937", "0.5988172", "0.5965715", "0.5953879", "0.5941471", "0.5921525", "0.5919041", "0.5918195", "0.590729", "0.5900649",...
0.80781776
0
Builds a bigquery AccessValueListEntry array from input file. Expects YAML or JSON formatted file.
def PermissionsFileProcessor(input_file): access_value_msg = GetApiMessage('Dataset').AccessValueListEntry try: permissions_array = [] permissions_from_file = yaml.load(input_file[0]) permissions_from_file = permissions_from_file.get('access', None) if not permissions_from_file or not isinstance(permissions_from_file, list): raise PermissionsFileError( 'Error parsing permissions file: no access list defined in file') for access_yaml in permissions_from_file: permission = encoding.PyValueToMessage(access_value_msg, access_yaml) if _ValidatePermission(permission): permissions_array.append(permission) else: raise PermissionsFileError(('Error parsing permissions file:' ' invalid permission definition' ' [{}]'.format(permission))) return sorted(permissions_array, key=lambda x: x.role) except yaml.YAMLParseError as ype: raise PermissionsFileError('Error parsing permissions file [{}]'.format( ype))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def create_from_file(self, file):\n self.value = []\n with open(file, \"r\") as f:\n fl = f.readlines()\n\n for l in fl:\n self.value.append([int(x) for x in l.split()])", "def load_vals(txtfile):\n import ast\n \n data = []\n li = load_help(txtfile)\n ...
[ "0.56744987", "0.5283599", "0.5243404", "0.5234778", "0.5156196", "0.51066506", "0.5005265", "0.5005265", "0.49895862", "0.4981226", "0.49759138", "0.49112916", "0.49017558", "0.48792005", "0.48355103", "0.48244855", "0.48244855", "0.48212677", "0.4802652", "0.47956467", "0.4...
0.5508567
1
Convert commandline duration into epoch timeoffset (in ms).
def ProcessTableExpiration(expire_duration): t = times.GetDateTimePlusDuration(datetime.datetime.now(), expire_duration) return int(time.mktime(t.timetuple())) * 1000
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_epoch_duration(self):\n\n now = time.time()\n epoch_duration = datetime.datetime.fromtimestamp(now - self.prev_time).strftime(\"%M:%S.%f\")[:-4]\n self.prev_time = now\n return epoch_duration", "def ms2pts(ms, dt):\n return int(ms/dt)", "def np_dt_epoch_msec(value):\n ...
[ "0.5740035", "0.56960464", "0.55968964", "0.5498718", "0.54860854", "0.5484328", "0.5429148", "0.54070693", "0.53953534", "0.53865397", "0.53487533", "0.5276387", "0.52748066", "0.52547956", "0.52436954", "0.52381575", "0.5197884", "0.5156624", "0.51419455", "0.5118423", "0.5...
0.0
-1
Convert Input JSON file into TableSchema message.
def BqTableSchemaFileProcessor(file_arg): table_schema_type = GetApiMessage('TableSchema') schema_field_type = GetApiMessage('TableFieldSchema') try: schema_json = yaml.load(file_arg) schema_json = schema_json.get('schema', None) if not schema_json or not isinstance(schema_json, list): raise SchemaFileError( 'Error parsing schema file: no schema field list defined in file') all_fields = [] for field in schema_json: new_field = schema_field_type(name=field['name'], type=field['type'], mode=field.get('mode', 'NULLABLE')) all_fields.append(new_field) return table_schema_type(fields=sorted(all_fields, key=lambda x: x.name)) except yaml.YAMLParseError as ype: raise SchemaFileError('Error parsing schema file [{}]'.format(ype)) except (AttributeError, KeyError) as e: raise SchemaFileError( 'Error parsing schema file, invalid field definition [{}]'.format(e))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def ReadSchemaFile(schema_file, bigquery_messages):\n\n if os.path.exists(schema_file):\n with open(schema_file, mode='r') as f:\n try:\n def UpperOrNone(string):\n return string and string.upper()\n field_schemas = [\n bigquery_messages.TableFieldSchema(\n ...
[ "0.6506698", "0.6255999", "0.6189637", "0.59361964", "0.5880678", "0.5861754", "0.5762274", "0.56616706", "0.56331164", "0.5612857", "0.55776805", "0.5533528", "0.55065984", "0.55045223", "0.548444", "0.547434", "0.54314774", "0.53971493", "0.5346988", "0.5327368", "0.5307453...
0.60093594
3
Convert Input JSON file into TableSchema message.
def BqTableDataFileProcessor(file_arg): data_insert_request_type = GetApiMessage('TableDataInsertAllRequest') insert_row_type = data_insert_request_type.RowsValueListEntry data_row_type = GetApiMessage('JsonObject') try: data_json = yaml.load(file_arg) if not data_json or not isinstance(data_json, list): raise TableDataFileError( 'Error parsing data file: no data records defined in file') rows = [] for row in data_json: rows.append(insert_row_type(json=encoding.DictToMessage( row, data_row_type))) return rows except yaml.YAMLParseError as ype: raise TableDataFileError('Error parsing data file [{}]'.format(ype))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def ReadSchemaFile(schema_file, bigquery_messages):\n\n if os.path.exists(schema_file):\n with open(schema_file, mode='r') as f:\n try:\n def UpperOrNone(string):\n return string and string.upper()\n field_schemas = [\n bigquery_messages.TableFieldSchema(\n ...
[ "0.6506698", "0.6255999", "0.6189637", "0.60093594", "0.59361964", "0.5880678", "0.5861754", "0.5762274", "0.56616706", "0.56331164", "0.5612857", "0.55776805", "0.5533528", "0.55065984", "0.55045223", "0.548444", "0.547434", "0.54314774", "0.5346988", "0.5327368", "0.5307453...
0.53971493
18
Set projectId value for a BigQueryXXXRequests.
def SetProjectId(ref, args, request): del ref project = args.project or properties.VALUES.core.project.Get(required=True) project_ref = resources.REGISTRY.Parse(project, collection='bigquery.projects') request.projectId = project_ref.Name() return request
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def set_project_quotas(self, project_id, request_model, extra_headers=None,\n use_auth=True, user_name=None):\n resp = self.client.post(\n 'project-quotas/' + project_id,\n request_model=request_model,\n response_model_type=quota_models.ProjectQuota...
[ "0.61926836", "0.6037495", "0.5940458", "0.58782727", "0.58782727", "0.5392154", "0.53831047", "0.5383003", "0.5351515", "0.5217758", "0.52084017", "0.51797605", "0.51797605", "0.51708114", "0.5099718", "0.5095747", "0.5076841", "0.50675255", "0.50642264", "0.5054526", "0.504...
0.7350548
0
Ensure that view parameters are set properly tables create request.
def SetViewParameters(ref, args, request): del ref # unused if not args.view: request.table.view = None return request
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _create(self, tables, views, schema_name, config):\n if not isinstance(tables, dict):\n return False # Raise Exception That Tables Are In A Wrong Format???!!!\n success = True\n if schema_name is not None:\n self._create_schema(schema_name)\n for table_name_in...
[ "0.63214684", "0.6122003", "0.60363513", "0.59719974", "0.583535", "0.5826202", "0.5768594", "0.57384115", "0.55392927", "0.55050695", "0.54572743", "0.54387593", "0.5411729", "0.53755325", "0.53628856", "0.5360331", "0.5358349", "0.53566074", "0.53360575", "0.5325151", "0.53...
0.65944326
0
Process the ifexists flag on datasets create.
def ProcessDatasetOverwrite(ref, args, request): del ref dataset_id = request.dataset.datasetReference.datasetId project_id = request.projectId if args.overwrite: if _DatasetExists(dataset_id, project_id): _TryDeleteDataset(dataset_id, project_id) return request
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def isExist(data):\n return True/False", "def is_version_data_existed(self):\n # if exists, skip\n # return \n\n return True", "def exist(self):", "def run(**kwargs):\n del kwargs # Unused args\n if os.path.exists(DATASET_PATH):\n LOGGER.info('... Dataset already exists....
[ "0.61832976", "0.5865549", "0.58427125", "0.5761497", "0.57018334", "0.5658679", "0.55542874", "0.5486935", "0.5465586", "0.54652494", "0.54532725", "0.5452605", "0.5446348", "0.5409404", "0.5387412", "0.5352073", "0.53434783", "0.5338109", "0.53380615", "0.53380615", "0.5296...
0.5203899
32
Process the overwrite flag on tables create.
def ProcessTableOverwrite(ref, args, request): dataset_id = ref.datasetId table_id = ref.Name() project_id = ref.projectId if args.overwrite: if _TableExists(dataset_id, table_id, project_id): _TryDeleteTable(dataset_id, table_id, project_id) return request
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def create_tables_and_apply_patches(self):\n\n if self.authorized and not self.db_tables_initiated:\n with self.connection.cursor() as cursor:\n for statement in self.parse_mysql_sql_file():\n cursor.execute(statement)\n\n PyFunceble.LOGGER.info(\n...
[ "0.6433206", "0.6371944", "0.6202041", "0.61849165", "0.60638314", "0.5852797", "0.580371", "0.5792183", "0.5743237", "0.5713558", "0.5528236", "0.5432233", "0.54164076", "0.5352115", "0.5333362", "0.5304936", "0.52447927", "0.5226724", "0.52159977", "0.52028906", "0.5177918"...
0.6969729
0
Process the overwrite flag on tables copy.
def ProcessTableCopyOverwrite(ref, args, request): del ref # Unused if args.overwrite: request.job.configuration.copy.writeDisposition = 'WRITE_TRUNCATE' return request
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def ProcessTableOverwrite(ref, args, request):\n dataset_id = ref.datasetId\n table_id = ref.Name()\n project_id = ref.projectId\n\n if args.overwrite:\n if _TableExists(dataset_id, table_id, project_id):\n _TryDeleteTable(dataset_id, table_id, project_id)\n\n return request", "def process_override...
[ "0.67875683", "0.5678444", "0.5599842", "0.55489415", "0.53554213", "0.52686346", "0.52098215", "0.5204137", "0.51888007", "0.51499987", "0.5148369", "0.5104553", "0.5098129", "0.5079654", "0.50565994", "0.50067544", "0.49729812", "0.4921561", "0.49208447", "0.49190444", "0.4...
0.8015918
0
Build JobConfigurationTableCopy from request resource args.
def ProcessTableCopyConfiguration(ref, args, request): del ref # Unused source_ref = args.CONCEPTS.source.Parse() destination_ref = args.CONCEPTS.destination.Parse() arg_utils.SetFieldInMessage( request, 'job.configuration.copy.destinationTable.datasetId', destination_ref.Parent().Name()) arg_utils.SetFieldInMessage( request, 'job.configuration.copy.destinationTable.projectId', destination_ref.projectId) arg_utils.SetFieldInMessage(request, 'job.configuration.copy.destinationTable.tableId', destination_ref.Name()) arg_utils.SetFieldInMessage(request, 'job.configuration.copy.sourceTable.datasetId', source_ref.Parent().Name()) arg_utils.SetFieldInMessage(request, 'job.configuration.copy.sourceTable.projectId', source_ref.projectId) arg_utils.SetFieldInMessage(request, 'job.configuration.copy.sourceTable.tableId', source_ref.Name()) return request
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def GetTableCopyResourceArgs():\n table_spec_data = yaml_data.ResourceYAMLData.FromPath('bq.table')\n arg_specs = [\n resource_args.GetResourcePresentationSpec(\n verb='to copy from', name='source', required=True, prefixes=True,\n attribute_overrides={'table': 'source'}, positional=False,\...
[ "0.6806518", "0.51471066", "0.5135233", "0.5034788", "0.4957393", "0.49131292", "0.483293", "0.4809419", "0.47891185", "0.47865072", "0.4757659", "0.46871853", "0.46683812", "0.46534342", "0.46357578", "0.46097738", "0.45914286", "0.45865327", "0.457555", "0.45609608", "0.455...
0.73063713
0
Process schema Updates (additions/mode changes) for the request. Retrieves the current table schema for ref and attempts to merge in the schema provided in the requests. This is necessary since the API backend does not handle PATCH semantics for schema updates (e.g. process the deltas) so we must always send the fully updated schema in the requests.
def ProcessSchemaUpdate(ref, args, request): table = request.table relaxed_columns = args.relax_columns if not table.schema and not relaxed_columns: # if not updating schema, return request # then just return. original_schema = _TryGetCurrentSchema(ref.Parent().Name(), ref.Name(), ref.projectId) new_schema_columns = table.schema updated_fields = _GetUpdatedSchema(original_schema, new_schema_columns, relaxed_columns) table_schema_type = GetApiMessage('TableSchema') request.table.schema = table_schema_type(fields=updated_fields) return request
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def compute_schema_updates(self):\n data = self.client._perform_json(\n \"GET\", \"/projects/%s/recipes/%s/schema-update\" % (self.project_key, self.recipe_name))\n return RequiredSchemaUpdates(self, data)", "async def upgradeSchema(self) -> None:", "def merge_schema_entry(\n se...
[ "0.65370333", "0.61016726", "0.59257406", "0.59054583", "0.57786834", "0.5593872", "0.5549331", "0.5481114", "0.5472658", "0.54690826", "0.5401272", "0.5361555", "0.53470576", "0.5315268", "0.5254957", "0.52487254", "0.5139043", "0.50770354", "0.5074396", "0.5061819", "0.5033...
0.8179473
0
Try to retrieve the current BigQuery TableSchema for a table_ref. Tries to fetch the schema of an existing table. Raises SchemaUpdateError if table is not found or if table is not of type 'TABLE'.
def _TryGetCurrentSchema(dataset_id, table_id, project_id): client = GetApiClient() service = client.tables get_request_type = GetApiMessage('BigqueryTablesGetRequest') get_request = get_request_type(datasetId=dataset_id, tableId=table_id, projectId=project_id) try: table = service.Get(get_request) if not table or table.type != 'TABLE': raise SchemaUpdateError('Schema modifications only supported ' 'on TABLE objects received [{}]'.format( table)) except apitools_exceptions.HttpNotFoundError: raise SchemaUpdateError('Table with id [{}:{}:{}] not found.'.format( project_id, dataset_id, table_id)) return table.schema
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_table_schema(dataset_id, table_id):\n logging.info('getting table schema')\n bigquery_client = bigquery.Client()\n dataset_ref = bigquery_client.dataset(dataset_id)\n bg_tableref = bigquery.table.TableReference(dataset_ref, table_id)\n bg_table = bigquery_client.get_table(bg_tableref)\n r...
[ "0.73889893", "0.6379297", "0.6305416", "0.6051959", "0.58043814", "0.57696646", "0.56733495", "0.5627315", "0.5534374", "0.5378048", "0.5363961", "0.53382576", "0.5309689", "0.5298764", "0.5259146", "0.52387893", "0.52369916", "0.52243495", "0.52111673", "0.5200395", "0.5174...
0.74110204
0
Update original_schema by adding and/or relaxing mode on columns.
def _GetUpdatedSchema( original_schema, new_columns=None, relaxed_columns=None): orig_field_map = ( {f.name: f for f in original_schema.fields} if original_schema else {}) if relaxed_columns: orig_field_map = _GetRelaxedCols(relaxed_columns, orig_field_map) if new_columns: orig_field_map = _AddNewColsToSchema(new_columns.fields, orig_field_map) return sorted(orig_field_map.values(), key=lambda x: x.name)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _GetRelaxedCols(relaxed_columns, orig_schema_map):\n updated_schema_map = orig_schema_map.copy()\n for col in relaxed_columns:\n if col in orig_schema_map:\n updated_schema_map[col].mode = 'NULLABLE'\n else:\n raise SchemaUpdateError(_INVALID_SCHEMA_UPDATE_MESSAGE)\n return updated_schema_ma...
[ "0.7231359", "0.7089279", "0.65710247", "0.6482866", "0.64562935", "0.61638325", "0.61030746", "0.5923907", "0.5808623", "0.5721755", "0.56536114", "0.56280774", "0.56276286", "0.5590315", "0.5581879", "0.55440784", "0.5543209", "0.55420613", "0.5536027", "0.55306125", "0.550...
0.6559085
3
Change mode to `NULLABLE` for columns in existing schema. Tries set mode on existing columns in orig_schema_map to `NULLABLE`. Raises SchemaUpdateError if column is not found in orig_schema_map.
def _GetRelaxedCols(relaxed_columns, orig_schema_map): updated_schema_map = orig_schema_map.copy() for col in relaxed_columns: if col in orig_schema_map: updated_schema_map[col].mode = 'NULLABLE' else: raise SchemaUpdateError(_INVALID_SCHEMA_UPDATE_MESSAGE) return updated_schema_map
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def merge_mode(self, old_schema_entry, new_schema_entry, base_path):\n old_info = old_schema_entry['info']\n new_info = new_schema_entry['info']\n old_mode = old_info['mode']\n old_name = old_info['name']\n old_type = old_info['type']\n old_status = old_schema_entry['statu...
[ "0.581769", "0.5178056", "0.5142687", "0.5107475", "0.50148743", "0.4938137", "0.49304643", "0.4709507", "0.4704251", "0.4693166", "0.46439534", "0.460573", "0.45949084", "0.4564342", "0.45579243", "0.44939494", "0.4487079", "0.44671857", "0.44571728", "0.44475183", "0.442848...
0.637235
0
Add new columns to an existing schema. Tries add new fields to an existing schema. Raises SchemaUpdateError if column already exists in the orig_schema_map.
def _AddNewColsToSchema(new_fields, orig_schema_map): updated_schema_map = orig_schema_map.copy() for new_field in new_fields: if new_field.name in orig_schema_map: raise SchemaUpdateError(_INVALID_SCHEMA_UPDATE_MESSAGE) updated_schema_map[new_field.name] = new_field return updated_schema_map
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def add_column(self, schema):\n self[schema.name] = schema.copy()", "def _add_to_schema(self, new: dict):\n self._defaults.update(new)\n self._migrate()", "def _GetRelaxedCols(relaxed_columns, orig_schema_map):\n updated_schema_map = orig_schema_map.copy()\n for col in relaxed_columns:\n...
[ "0.6689222", "0.6531303", "0.60537446", "0.6039341", "0.60350144", "0.5883875", "0.5845651", "0.5813085", "0.5690668", "0.5641946", "0.56057465", "0.54915494", "0.54909086", "0.5434344", "0.542425", "0.5340224", "0.53072643", "0.5284575", "0.52605635", "0.5229555", "0.5223570...
0.82155335
0
Validate a resource of the given type with specified ID already exists.
def _DatasetExists(dataset_id, project_id): client = GetApiClient() service = client.datasets get_request_type = GetApiMessage('BigqueryDatasetsGetRequest') get_request = get_request_type(datasetId=dataset_id, projectId=project_id) try: service.Get(get_request) return True except apitools_exceptions.HttpNotFoundError: log.info('Dataset with id [{}:{}] not found.'.format( project_id, dataset_id)) return False
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_id_already_exists(self) -> None:\n with pytest.raises(IntegrityError):\n ObservationType.add({'id': 1, 'name': 'New Type', 'units': 'Kilo-Frobnicate',\n 'description': 'A new filter type.'})", "def check_id(self, id):", "def testValidateId(self):\n ...
[ "0.6422405", "0.64173734", "0.6142269", "0.61312246", "0.5939884", "0.5908327", "0.5880398", "0.5806814", "0.577634", "0.5734848", "0.5730072", "0.56382006", "0.55637", "0.5501549", "0.5488068", "0.5485431", "0.5476124", "0.5466836", "0.54339", "0.5411698", "0.5398409", "0....
0.0
-1
Validate a resource of the given type with specified ID already exists.
def _TableExists(dataset_id, table_id, project_id): client = GetApiClient() service = client.tables get_request_type = GetApiMessage('BigqueryTablesGetRequest') get_request = get_request_type(datasetId=dataset_id, tableId=table_id, projectId=project_id) try: service.Get(get_request) return True except apitools_exceptions.HttpNotFoundError: log.info('Table with id [{}:{}:{}] not found.'.format( project_id, dataset_id, table_id)) return False
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_id_already_exists(self) -> None:\n with pytest.raises(IntegrityError):\n ObservationType.add({'id': 1, 'name': 'New Type', 'units': 'Kilo-Frobnicate',\n 'description': 'A new filter type.'})", "def check_id(self, id):", "def testValidateId(self):\n ...
[ "0.6422405", "0.64173734", "0.6142269", "0.61312246", "0.5939884", "0.5908327", "0.5880398", "0.5806814", "0.577634", "0.5734848", "0.5730072", "0.56382006", "0.55637", "0.5501549", "0.5488068", "0.5485431", "0.5476124", "0.5466836", "0.54339", "0.5411698", "0.5398409", "0....
0.0
-1
Try to delete a dataset, propagating error on failure.
def _TryDeleteDataset(dataset_id, project_id): client = GetApiClient() service = client.datasets delete_request_type = GetApiMessage('BigqueryDatasetsDeleteRequest') delete_request = delete_request_type(datasetId=dataset_id, projectId=project_id, deleteContents=True) service.Delete(delete_request) log.info('Deleted dataset [{}:{}]'.format(project_id, dataset_id))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def delete_dataset(self, dataset):\n raise NotImplementedError('delete_dataset')", "def _handle_creation_failure(session: Session, stub_dataset: Dataset, error: str):\n try:\n dataset.delete(session, stub_dataset)\n except requests.HTTPError:\n raise CreationFailure(\n f\"Cr...
[ "0.7605005", "0.7107175", "0.70523417", "0.69809973", "0.6883148", "0.6740322", "0.6667521", "0.65653944", "0.6536053", "0.65328515", "0.65034896", "0.6410687", "0.6386324", "0.6328574", "0.63253295", "0.62904114", "0.6277353", "0.62735856", "0.6252367", "0.61857146", "0.6168...
0.74737805
1
Try to delete a dataset, propagating error on failure.
def _TryDeleteTable(dataset_id, table_id, project_id): client = GetApiClient() service = client.tables delete_request_type = GetApiMessage('BigqueryTablesDeleteRequest') delete_request = delete_request_type(datasetId=dataset_id, tableId=table_id, projectId=project_id) service.Delete(delete_request) log.info('Deleted table [{}:{}:{}]'.format(project_id, dataset_id, table_id))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def delete_dataset(self, dataset):\n raise NotImplementedError('delete_dataset')", "def _TryDeleteDataset(dataset_id, project_id):\n client = GetApiClient()\n service = client.datasets\n delete_request_type = GetApiMessage('BigqueryDatasetsDeleteRequest')\n delete_request = delete_request_type(dataset...
[ "0.7605005", "0.74737805", "0.7107175", "0.70523417", "0.69809973", "0.6883148", "0.6740322", "0.6667521", "0.65653944", "0.6536053", "0.65328515", "0.65034896", "0.6410687", "0.6386324", "0.6328574", "0.63253295", "0.62904114", "0.6277353", "0.62735856", "0.6252367", "0.6185...
0.52461874
96
Get Table resource args (source, destination) for copy command.
def GetTableCopyResourceArgs(): table_spec_data = yaml_data.ResourceYAMLData.FromPath('bq.table') arg_specs = [ resource_args.GetResourcePresentationSpec( verb='to copy from', name='source', required=True, prefixes=True, attribute_overrides={'table': 'source'}, positional=False, resource_data=table_spec_data.GetData()), resource_args.GetResourcePresentationSpec( verb='to copy to', name='destination', required=True, prefixes=True, attribute_overrides={'table': 'destination'}, positional=False, resource_data=table_spec_data.GetData())] fallthroughs = { '--source.dataset': ['--destination.dataset'], '--destination.dataset': ['--source.dataset'] } return [concept_parsers.ConceptParser(arg_specs, fallthroughs)]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def ProcessTableCopyConfiguration(ref, args, request):\n del ref # Unused\n source_ref = args.CONCEPTS.source.Parse()\n destination_ref = args.CONCEPTS.destination.Parse()\n arg_utils.SetFieldInMessage(\n request, 'job.configuration.copy.destinationTable.datasetId',\n destination_ref.Parent().Name(...
[ "0.62132674", "0.56889355", "0.55710876", "0.55588037", "0.5540921", "0.54608697", "0.54608697", "0.54608697", "0.53417224", "0.5325953", "0.52964944", "0.52829", "0.5282849", "0.52788526", "0.52047265", "0.52009565", "0.51657414", "0.51652837", "0.516145", "0.5143834", "0.51...
0.8508633
0
Print a simple greeting to each user in the list.
def greet_users(names): for name in names: msg = "Hello, " + name.title() + "!" print(msg)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def greet_users(names):\n for name in names:\n print(f\"Hello, {name.title()}!\")", "def greeting(list_of_guests):\r\n for i in list_of_guests: \r\n print('Witaj ' + i)", "def greet_users(names):\n for name in names:\n msg = f\"Hello, {name.title()}\"\n print(msg)", "...
[ "0.7734112", "0.77260417", "0.77145106", "0.76987046", "0.76987046", "0.7657923", "0.7309932", "0.7257354", "0.72450185", "0.71396", "0.7127663", "0.71238464", "0.70934796", "0.70934796", "0.7048947", "0.70248073", "0.69579583", "0.69458425", "0.6898827", "0.6775238", "0.6753...
0.7734813
1
Return a full name, neatly formatted.
def get_formatted_name(first_name, last_name, middle_name=''): if middle_name: full_name = first_name + ' ' + middle_name + ' ' + last_name else: full_name = first_name + ' ' + last_name return full_name.title()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_full_name(self):\n full_name = '{0} {1} {2}'.format(self.last_name, self.first_name, self.patronymic)\n return full_name.strip()", "def get_full_name(self):\n\t\tfull_name = '%s %s' % (self.first_name, self.last_name)\n\t\treturn full_name.strip()", "def get_full_name(self):\r\n fu...
[ "0.87166595", "0.86399925", "0.860218", "0.860218", "0.8595642", "0.8579464", "0.8579464", "0.8579464", "0.8579464", "0.8579464", "0.8579464", "0.8579464", "0.8579464", "0.8579464", "0.8579464", "0.8579464", "0.8579464", "0.8579464", "0.8579464", "0.8579464", "0.8579464", "...
0.8130383
83
Return a dictionary of information about a person.
def build_person(first_name, last_name, age=''): person = {'first': first_name, 'last': last_name} if age: person['age'] = age return person
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_person(self):\n person_dict = {\n 'firstname': self.__firstname,\n 'lastname': self.__lastname,\n 'height': self.__height,\n 'weight': self.__weight,\n 'age': self.__age\n }\n return person_dict", "def who_am_i():\n return {'n...
[ "0.78430605", "0.69874537", "0.68447304", "0.67983556", "0.6684294", "0.6533277", "0.64916706", "0.6416313", "0.64020026", "0.63483405", "0.63206047", "0.62963533", "0.6294688", "0.6291786", "0.6261915", "0.6250636", "0.62482584", "0.62299424", "0.62139595", "0.61749566", "0....
0.58440995
48