query
stringlengths
9
3.4k
document
stringlengths
9
87.4k
metadata
dict
negatives
listlengths
4
101
negative_scores
listlengths
4
101
document_score
stringlengths
3
10
document_rank
stringclasses
102 values
Prepare a list of all records from the payload to send to SQS
def _payload_messages(payloads): return [ message for payload in payloads for message in payload.sqs_messages ]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def create_records(data: List[str]) -> List[dict]:\n records = []\n for d in data:\n records.append(create_record(d))\n\n logger.debug(f\"Formed Kinesis Records batch for PutRecords API: {records}\")\n return records", "def get_events_batch() -> PayloadDictList:\n ...", "def _prepare_...
[ "0.6159722", "0.59462416", "0.5789384", "0.5695923", "0.56770617", "0.5449017", "0.54378134", "0.54224366", "0.5411029", "0.53040165", "0.52972186", "0.529558", "0.52868986", "0.5283321", "0.52753913", "0.52752817", "0.5261119", "0.52518207", "0.5238154", "0.5229178", "0.5208...
0.56031376
5
Send a list of records to SQS, batching as necessary
def send(self, payloads): records = self._payload_messages(payloads) # SQS only supports up to 10 messages so do the send in batches for message_batch in self._message_batches(records): response = self._send_messages(message_batch) self._finalize(response, message_batch)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _message_batches(cls, records):\n # Dump the records to a list of minimal json\n records_json = [\n json.dumps(record, separators=(',', ':')) for record in records\n ]\n\n current_batch_size = 0\n current_batch = []\n for record in records_json:\n ...
[ "0.6739715", "0.66852796", "0.6401511", "0.63491935", "0.6338009", "0.63261616", "0.63134557", "0.63052964", "0.6195632", "0.6183819", "0.6173889", "0.61398053", "0.6103908", "0.6066584", "0.6057908", "0.60467565", "0.6021644", "0.6018628", "0.5985069", "0.5927403", "0.588516...
0.7458315
0
Method to follow another user that is, to create a unidirectional link from one user to the other.
def follow(self, user_index, following_index): if user_index >= self.num_users or following_index >= self.num_users: raise ValueError( f"Number of users is {self.num_users}, but indices " f"{user_index} and {following_index} were requested." ) if self.users_hat[following_index, user_index] == 0: self.users_hat[following_index, user_index] = 1 elif self.is_verbose(): self.log(f"User {following_index} was already following user {user_index}")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def follow_user(cls, user, following):\r\n pass", "def follow_user(cls, user, following):\n pass", "def follow(self, follower, followee):\n pass", "def follow(self, other):\n\t\tif not self.follows(other):\n\t\t\tself.followed.append(other)", "def follow(request, usertofollow):\n to...
[ "0.7963307", "0.7816804", "0.74662054", "0.7300386", "0.7281261", "0.7074805", "0.7074805", "0.6866235", "0.68443894", "0.68353206", "0.6758741", "0.67564857", "0.67033875", "0.6687655", "0.6658397", "0.6617403", "0.6589921", "0.65871906", "0.65700394", "0.6544247", "0.653075...
0.6643213
15
Method to unfollow another user that is, to delete the unidirectional link that goes from one user to the other.
def unfollow(self, user_index, following_index): if user_index >= self.num_users or following_index >= self.num_users: raise ValueError( f"Number of users is {self.num_users}, but indices " f"{user_index} and {following_index} were requested." ) if self.users_hat[following_index, user_index] == 1: self.users_hat[following_index, user_index] = 0 elif self.is_verbose(): self.log(f"User {following_index} was not following user {user_index}")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def unfollow(self, other):\n\t\tif self.follows(other):\n\t\t\tself.followed.remove(other)", "def unfollow(self, user):\n f = self.followed.filter_by(followed_id=user.id).first()\n if f:\n db.session.delete(f)", "def unfollow(request, usertostopfollow):\n stop_follow = Member.object...
[ "0.8147802", "0.8125675", "0.80680937", "0.80040616", "0.78747725", "0.7863218", "0.76762897", "0.7540846", "0.7521238", "0.74924225", "0.74637675", "0.7456486", "0.74547946", "0.73516536", "0.73467946", "0.7258564", "0.7226699", "0.719125", "0.7158494", "0.71420807", "0.7140...
0.7063021
25
Method to add a user as friends that is, to create a bidirectional link that connects the two users.
def add_friends(self, user1_index, user2_index): if user1_index >= self.num_users or user2_index >= self.num_users: raise ValueError( f"Number of users is {self.num_users}, but indices " f"{user1_index} and {user2_index} were requested." ) if self.users_hat[user1_index, user2_index] == 0: self.users_hat[user1_index, user2_index] = 1 elif self.is_verbose(): self.log(f"User {user2_index} was already following user {user1_index}") if self.users_hat[user2_index, user1_index] == 0: self.users_hat[user2_index, user1_index] = 1 elif self.is_verbose(): self.log(f"User {user1_index} was already following user {user2_index}")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def add_friend(self, User):\n if not User in self.friends.all():\n self.friend.add(User)\n #self.save()", "def addfriend(self, second_user_id):\n second_user = User.objects.get(id=second_user_id)\n new_friendship = Friendship.objects.create(friend_user=self, friend=seco...
[ "0.7621462", "0.7019252", "0.69413483", "0.69305646", "0.69305646", "0.69305646", "0.6898999", "0.68216527", "0.67973375", "0.67016155", "0.6672114", "0.6542767", "0.6497178", "0.6497178", "0.6497178", "0.6497178", "0.6441999", "0.6441999", "0.6441999", "0.6441999", "0.643866...
0.7181159
1
Method to remove a user from friends that is, to remove a bidirectional link that connects the two users.
def remove_friends(self, user1_index, user2_index): if user1_index >= self.num_users or user2_index >= self.num_users: raise ValueError( f"Number of users is {self.num_users}, but indices " f"{user1_index} and {user2_index} were requested." ) if self.users_hat[user1_index, user2_index] == 1: self.users_hat[user1_index, user2_index] = 0 elif self.is_verbose(): self.log(f"User {user2_index} was not following user {user1_index}") if self.users_hat[user2_index, user1_index] == 1: self.users_hat[user2_index, user1_index] = 0 elif self.is_verbose(): self.log(f"User {user1_index} was not following user {user2_index}")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def unfriend(self, remove):\n remover_friends_list = self # person terminating the friendship \n \n # remove friend from remover friend list\n remover_friends_list.remove_friend(removee)\n\n #remove friend from removee friend list\n friends_list = FriendList.objects.get(user=r...
[ "0.7632799", "0.7582847", "0.755684", "0.730245", "0.71368355", "0.7024519", "0.69181836", "0.676393", "0.6699931", "0.667411", "0.6658038", "0.6638588", "0.66359735", "0.6634647", "0.6563862", "0.6552684", "0.65465987", "0.65465987", "0.65465987", "0.6525971", "0.65162206", ...
0.6983191
6
Connect to a specific port
def connect(): sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) try: sock.connect((src_addr, src_port)) except: print("Error connecting to {}:{}".format(src_addr, src_port)) return None try: print("Sending stream info") sock.sendall(struct.pack('<iBi', 5, 1, stream_id)); except: print("Error: Stream rejected") return None print("Successfully connected to host") return sock
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def connect(self, host, port):\n pass", "def connect(self, port=None, options=None):\n pass", "def connect(self,ip,port):\n return self.network.connect(ip,port)", "def connect(self, host=None, port=None):\n host = self.host if host is None else host\n port = self.port if po...
[ "0.81374085", "0.7722904", "0.7718337", "0.7456098", "0.7439767", "0.7409976", "0.740931", "0.7345239", "0.7266387", "0.7218907", "0.71471167", "0.71010596", "0.7090724", "0.7063016", "0.7051909", "0.70335734", "0.70265913", "0.70235753", "0.7016226", "0.700159", "0.69971234"...
0.0
-1
Experimental function to read each stream frame from the server
def recv_depth_frame(sock): (frame_size,) = struct.unpack("<i", recv_all(sock, 4)) return recv_all(sock, frame_size)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def stream_frames(video_capture):", "def get_frame(self):\n\n self.load_network_stream()\n\n while True:\n try:\n if self.online:\n # Read next frame from stream and insert into deque\n status, frame = self.cap.read()\n ...
[ "0.7428881", "0.68029535", "0.67922986", "0.6777833", "0.6765679", "0.6748719", "0.67172873", "0.6695475", "0.6688924", "0.6657313", "0.6572944", "0.64670205", "0.64453244", "0.6411161", "0.63258195", "0.6290956", "0.62526816", "0.62319446", "0.6203036", "0.6193663", "0.61668...
0.0
-1
Render the Lilypond music expression lily using lilypond.
def render_lily(self, lily): shasum = "%s.png" % sha(lily.encode('utf-8')).hexdigest() relfn = posixpath.join(self.builder.imgpath, 'lily', shasum) outfn = path.join(self.builder.outdir, '_images', 'lily', shasum) if path.isfile(outfn): return relfn if hasattr(self.builder, '_lilypng_warned'): return None, None music = DOC_HEAD + self.builder.config.pnglily_preamble + lily if isinstance(music, unicode): music = music.encode('utf-8') # use only one tempdir per build -- the use of a directory is cleaner # than using temporary files, since we can clean up everything at once # just removing the whole directory (see cleanup_tempdir_lily) if not hasattr(self.builder, '_lilypng_tempdir'): tempdir = self.builder._lilypng_tempdir = tempfile.mkdtemp() else: tempdir = self.builder._lilypng_tempdir tf = open(path.join(tempdir, 'music.ly'), 'w') tf.write(music) tf.close() ensuredir(path.dirname(outfn)) # use some standard lilypond arguments lilypond_args = [self.builder.config.pnglily_lilypond] #lilypond_args += ['-o', tempdir, '--png'] lilypond_args += ['-dbackend=eps', '-dno-gs-load-fonts', '-dinclude-eps-fonts', '-o', tempdir, '--png'] # add custom ones from config value lilypond_args.extend(self.builder.config.pnglily_lilypond_args) # last, the input file name lilypond_args.append(path.join(tempdir, 'music.ly')) try: p = Popen(lilypond_args, stdout=PIPE, stderr=PIPE) except OSError, err: if err.errno != 2: # No such file or directory raise self.builder.warn('lilypond command %r cannot be run (needed for music ' 'display), check the pnglily_lilypond setting' % self.builder.config.pnglily_lilypond) self.builder._lilypng_warned = True return None, None stdout, stderr = p.communicate() if p.returncode != 0: raise LilyExtError(u'lilypond exited with error:\n[stderr]\n%s\n' '[stdout]\n%s' % (stderr.decode('utf-8'), stdout.decode('utf-8'))) shutil.copyfile(path.join(tempdir, 'music.png'), outfn) #Popen(['mogrify', '-trim', outfn], stdout=PIPE, stderr=PIPE) return relfn
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def display(self, format=\"png\"):\n from .core.transforms import lilypond\n seq = HSeq(self) | lilypond()\n\n lily_output = write_lilypond.lily_format(seq)\n if not lily_output.strip():\n #In the case of empty lily outputs, return self to get a textual display\n r...
[ "0.62011886", "0.60662764", "0.5725134", "0.5563053", "0.5401194", "0.52068543", "0.51799726", "0.51762205", "0.5175516", "0.5059088", "0.4895712", "0.4883887", "0.48677018", "0.4829131", "0.47872004", "0.4744463", "0.47235727", "0.4710764", "0.47090602", "0.4685067", "0.4676...
0.7580346
0
This function prints and plots the confusion matrix. Normalization can be applied by setting `normalize=True`.
def plot_confusion_matrix(self, y_test, y_pred, normalize=False, title='Confusion matrix', cmap=plt.cm.Blues): target_names = ['Thông thường', 'Đầu cơ'] cm = confusion_matrix(y_test, y_pred) plt.imshow(cm, interpolation='nearest', cmap=cmap) plt.title(title) plt.colorbar() tick_marks = np.arange(len(target_names)) plt.xticks(tick_marks, target_names, rotation=0) plt.yticks(tick_marks, target_names) if normalize: cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis] # print("Normalized confusion matrix") else: 1 # print('Confusion matrix, without normalization') # print(cm) thresh = cm.max() / 2. for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])): plt.text(j, i, cm[i, j], horizontalalignment="center", color="white" if cm[i, j] > thresh else "black") plt.tight_layout() plt.ylabel('True label') plt.xlabel('Predicted label') plt.show()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def plot_confusion_matrix(cm, classes=[0,1], normalize=False, title='Confusion matrix', print_matrix=False):\n\n if normalize:\n cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]\n print(\"Normalized confusion matrix\")\n else:\n print('Confusion matrix, without normalization')\n\n...
[ "0.8194862", "0.80949175", "0.8029915", "0.8019153", "0.79941195", "0.7991258", "0.7980955", "0.7976606", "0.79610753", "0.79590565", "0.79378676", "0.7934962", "0.7934504", "0.79313844", "0.7926313", "0.7924577", "0.79241234", "0.7923211", "0.7923023", "0.7921931", "0.791787...
0.0
-1
Get an AWS credential.
def get_aws_secret(role): global AWS_ID AWS_ID += 1 request.data return jsonify({ "request_id": f"a-request-id-{AWS_ID}", "lease_id": f"aws/creds/{role}/a-lease-id-{AWS_ID}", "renewable": True, "lease_duration": 3600, "data": { "access_key": "ASDF1234", "secret_key": "xljadslklk3mlkmlkmxklmx09j3990j", "security_token": None }, "wrap_info": None, "warnings": None, "auth": None })
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def aws_credentials(self) -> dict:\n response = self._session().get(self._cloud_access_url())\n if not response.ok:\n response.raise_for_status()\n cloud_access = response.json()\n creds = {\n 'aws_access_key_id': cloud_access['AccessKeyId'],\n 'aws_secr...
[ "0.696194", "0.6921474", "0.6762357", "0.6643356", "0.65502733", "0.650131", "0.6463692", "0.6458278", "0.64089775", "0.6393392", "0.6385138", "0.6384381", "0.63843215", "0.63843215", "0.63843215", "0.63800085", "0.636566", "0.63641655", "0.635698", "0.635698", "0.63561594", ...
0.6363484
18
Look up an auth token.
def look_up_a_token(): try: data = request.get_json(force=True) except Exception: data = None if data: tok = data['token'] else: tok = request.headers.get('TOK_ID') request.data try: creation_time = int(round(datetime.timestamp(tokens[tok]), 0)) issue_time = tokens[tok].isoformat() except Exception: _now = datetime.now(UTC) creation_time = int(round(datetime.timestamp(_now))) issue_time = _now.isoformat() tokens[tok] = _now expire_time = datetime.fromtimestamp(creation_time + 2764790) return jsonify({ "data": { "accessor": "8609694a-cdbc-db9b-d345-e782dbb562ed", "creation_time": creation_time, "creation_ttl": 2764800, "display_name": "fooname", "entity_id": "7d2e3179-f69b-450c-7179-ac8ee8bd8ca9", "expire_time": expire_time.isoformat(), "explicit_max_ttl": 0, "id": tok, "identity_policies": [ "dev-group-policy" ], "issue_time": issue_time, "meta": { "username": "tesla" }, "num_uses": 0, "orphan": True, "path": "auth/kubernetes/login", "policies": [ "default" ], "renewable": True, "ttl": 2764790 } })
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _get_auth_token(self):\n\n __logger__.debug(\"Getting auth Token\")\n return self.keystone_client.auth_ref['token']['id']", "def _lookup_token(self):\n path = '/authn/{account}/{login}/authenticate'.format(\n account=self.account, login='admin'\n )\n res = self._...
[ "0.746558", "0.74365014", "0.7382705", "0.721436", "0.7206889", "0.7119792", "0.7098907", "0.69588864", "0.6947529", "0.690765", "0.6845077", "0.68132395", "0.68104905", "0.67953134", "0.67916936", "0.6760361", "0.6760361", "0.6753948", "0.67510355", "0.67459446", "0.67286247...
0.6357753
74
Attach the Market Cap CustomFactor to the Pipeline returns Pipeline (numpy.array) An array containing all data needed for the algorithm
def make_pipeline(): mkt_cap_screen = (morningstar.valuation.market_cap.latest > 1e9) return Pipeline( columns={ 'Free Cash Flow': morningstar.cash_flow_statement.free_cash_flow.latest, }, screen=mkt_cap_screen)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def fill_pipeline():\n\n # m1_pca = PCA()\n m1_pca = PCA(svd_solver='randomized', whiten=True) # 与官网里子一致的后2个参数,否则分数很差\n # m1_pca.fit(X_train)\n\n m2_svc = SVC(kernel='rbf', class_weight='balanced')\n\n pipe = Pipeline(steps=[('pca', m1_pca),\n ('svc', m2_svc)])\n print(...
[ "0.6081324", "0.59062445", "0.5773329", "0.56232554", "0.5604073", "0.56040514", "0.56028223", "0.5570722", "0.5512372", "0.54467845", "0.544168", "0.54007536", "0.5379913", "0.5364416", "0.53506017", "0.5311731", "0.5311245", "0.52821445", "0.5277087", "0.5264683", "0.520849...
0.55851984
7
Called every day before market open.
def before_trading_start(context, data): context.output = pipeline_output('pipeline') # sort by earning yield context.output = context.output.sort( columns='Free Cash Flow', ascending=False) # get top 20 stocks as security list context.eligible_assets = context.output.iloc[:19]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def on_market_info(self):\n pass", "def isMarketOpen(self):\n if not self.normalDay:\n return False\n now = datetime.now()\n if now.hour >= 9 and now.hour < 16:\n if now.hour == 9 and now.minute < 30:\n return False\n return True\n ...
[ "0.6588593", "0.65427184", "0.6336546", "0.62522703", "0.61856025", "0.61591846", "0.6053704", "0.60283375", "0.60003495", "0.5933112", "0.5917485", "0.590598", "0.5869488", "0.5780416", "0.57541144", "0.57364595", "0.5636754", "0.561605", "0.5577664", "0.55399215", "0.551966...
0.496805
99
This function places an order for "context.index" in the amount required to neutralize the beta exposure of the portfolio. Note that additional leverage in the account is taken on, however, net market exposure is reduced.
def hedge_portfolio(context, data): factors = get_alphas_and_betas(context, data) beta_exposure = 0.0 count = 0 for asset in context.portfolio.positions: if asset in factors and asset != context.index: if not np.isnan(factors[asset].beta): beta_exposure += factors[asset].beta count += 1 beta_hedge = -1.0 * beta_exposure / count dollar_amount = context.portfolio.portfolio_value * beta_hedge record(beta_hedge=beta_hedge) if not np.isnan(dollar_amount): order_target_value(context.index, dollar_amount)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_alphas_and_betas(context, data):\r\n all_assets = context.portfolio.positions.keys()\r\n if context.index not in all_assets:\r\n all_assets.append(context.index)\r\n prices = data.history(all_assets, 'price', context.lookback, '1d')\r\n returns = prices.pct_change()[1:]\r\n # index_re...
[ "0.5514604", "0.5256099", "0.5154788", "0.50738245", "0.50134844", "0.5007411", "0.4994111", "0.49798325", "0.4962537", "0.4952547", "0.4933376", "0.49214765", "0.49075228", "0.49000627", "0.4870517", "0.48602587", "0.4830687", "0.48236924", "0.47850198", "0.47702926", "0.474...
0.55305976
0
returns a dataframe of 'alpha' and 'beta' exposures for each asset in the current universe.
def get_alphas_and_betas(context, data): all_assets = context.portfolio.positions.keys() if context.index not in all_assets: all_assets.append(context.index) prices = data.history(all_assets, 'price', context.lookback, '1d') returns = prices.pct_change()[1:] # index_returns = returns[context.index] factors = {} for asset in context.portfolio.positions: try: y = returns[asset] factors[asset] = linreg(returns[context.index], y) except: log.warn("[Failed Beta Calculation] asset = %s" % asset.symbol) return pd.DataFrame(factors, index=['alpha', 'beta'])
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_alphas(portfolio_returns,risk_free,market_returns,betas):\r\n \r\n R = portfolio_returns\r\n Rf = risk_free\r\n Beta = betas\r\n Rm = market_returns\r\n alpha = R - Rf - (Beta*(Rm-Rf))\r\n \r\n return alpha", "def transparency(\n et: pd.DataFrame, alpha_by: Hashable, alpha_boun...
[ "0.571466", "0.5539759", "0.5527638", "0.5383794", "0.53507555", "0.5238355", "0.51728773", "0.5134495", "0.5117016", "0.50879073", "0.50690675", "0.5064041", "0.5010054", "0.49698728", "0.49637634", "0.49520984", "0.49412426", "0.4937754", "0.4913756", "0.49073732", "0.48986...
0.7166975
0
Removes charracters listed in self.custom_chars
def _remove_custom_chars(self, text: str) -> str: patterns = "|".join([x for x in self.custom_chars]) return re.sub(patterns, "", str(text), flags=re.IGNORECASE)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def removeSpecialChars(self) -> None:\n self.text = re.sub('[^a-zA-z0-9\\n\\.\\s]', '', self.text)", "def _remove_special_chars(self, text: str) -> str:\n pattern = re.compile(self.special_chars_pattern)\n text = re.sub(pattern, \" \", text)\n return text", "def strip_other_charcter...
[ "0.7372764", "0.7265707", "0.7070031", "0.6986273", "0.69196767", "0.68504214", "0.67211777", "0.66895443", "0.6674223", "0.6622369", "0.662198", "0.66078997", "0.65977836", "0.6588692", "0.65562934", "0.65343094", "0.6528675", "0.6523658", "0.6516981", "0.650481", "0.6504477...
0.8835553
0
Removes strings starting with http
def _remove_urls(self, text: str) -> str: pattern = r"http\S+" return re.sub(pattern, " ", str(text))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def remove_URL(sample):\n return re.sub(r\"http\\S+\", \"\", sample)", "def remove_urls(self, text):\n return re.sub(r'http.?://[^\\s]+[\\s]?', '', text)", "def remove_url(txt):\n\n return \" \".join(re.sub(\"([^0-9A-Za-z \\t])|(\\w+:\\/\\/\\S+)\", \"\", txt).split())", "def remove_url(text):\n...
[ "0.78006214", "0.7698079", "0.7578181", "0.74963003", "0.7458265", "0.74112725", "0.73307616", "0.72641194", "0.72031736", "0.7188069", "0.71281874", "0.70712876", "0.7058085", "0.7049562", "0.7030817", "0.69787", "0.6963308", "0.69407505", "0.6912276", "0.68773633", "0.68092...
0.780052
1
Removes html tags and other related elements
def _remove_html_tags(self, text: str) -> str: pattern = r""" (?x) # Turn on free-spacing <[^>]+> # Remove <html> tags | &([a-z0-9]+|\#[0-9]{1,6}|\#x[0-9a-f]{1,6}); # Remove &nbsp; """ return re.sub(pattern, " ", str(text))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def remove_html_tags_fun(self):\n cleaner = re.compile('<.*?>')\n cleaned_text = re.sub(cleaner, '', self.doc)\n cleaned_text = re.sub('[\\n\\t]', '', cleaned_text)\n self.doc = cleaned_text", "def remove_html_tags(self,text):\n #https://medium.com/@jorlugaqui/how-to-strip-html...
[ "0.84983486", "0.80671585", "0.7806236", "0.7542184", "0.7538982", "0.75369763", "0.75203156", "0.74862987", "0.74467105", "0.74467105", "0.7445784", "0.7424373", "0.73804176", "0.7364171", "0.7357503", "0.7353617", "0.7350502", "0.7333802", "0.7327136", "0.73163265", "0.7316...
0.74180526
12
Replaces accents with plain alphabets
def _remove_diacritics(self, text: str) -> str: nfkd_form = unicodedata.normalize("NFKD", text) return "".join([char for char in nfkd_form if not unicodedata.combining(char)])
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _replace_accented(text: str) -> str:\n return unidecode.unidecode(text)", "def remove_accented_chars(text):\n text = unidecode.unidecode(text)\n return text", "def replace_accented(input_str):\n nkfd_form = unicodedata.normalize('NFKD', input_str)\n return u\"\".join([c for c in nkfd_form if...
[ "0.7602486", "0.7276023", "0.7271902", "0.7019935", "0.6963169", "0.6874722", "0.682965", "0.67585826", "0.66468215", "0.6607765", "0.6562943", "0.6530856", "0.64882517", "0.6410148", "0.640093", "0.63787353", "0.628986", "0.6249889", "0.6244368", "0.6241801", "0.61995006", ...
0.6086533
31
Removes any occurence of digits from the text
def _remove_digits(self, text: str) -> str: return re.sub(r"\d+", " ", str(text))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def remove_digits(self, text):\n return re.sub('\\d+', '', text)", "def remove_numbers(text):\n return re.sub(r'\\d+', '',text)", "def remove_numbers(text):\n result = re.sub(r'\\d+', '', text)\n return result", "def remove_digits(text):\n return re.sub(r'[\\d]', '', text)", "def remove_...
[ "0.8916652", "0.8736428", "0.8688471", "0.8684917", "0.8524612", "0.8524612", "0.8524612", "0.8419046", "0.82202196", "0.82007706", "0.8171787", "0.8140685", "0.8085484", "0.80677295", "0.7969214", "0.79090506", "0.7773248", "0.7762869", "0.76908314", "0.7537781", "0.7432999"...
0.8508615
7
Removes isolated block of digits
def _remove_digit_blocks(self, text: str) -> str: return re.sub(r"\b\d+\b", " ", str(text))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def remove_free_digits(text):\n return RegexFilters.replace_free_digits(text, \" \")", "def remove_nums(self, text):\r\n return text.translate(None, digits)", "def remove_nums(self, text):\r\n return text.translate(None, digits)", "def remove_nums(self, text):\r\n return text.tran...
[ "0.7002717", "0.6815302", "0.6815302", "0.6815302", "0.6682333", "0.6597962", "0.6564372", "0.6525351", "0.6459419", "0.6430528", "0.6379692", "0.63564974", "0.6303089", "0.6271742", "0.6265842", "0.6203773", "0.6179784", "0.6123884", "0.6112976", "0.610751", "0.6047446", "...
0.7632309
0
Removes special characters as defined by the pattern in self.special_chars_pattern
def _remove_special_chars(self, text: str) -> str: pattern = re.compile(self.special_chars_pattern) text = re.sub(pattern, " ", text) return text
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def removeSpecialChars(self) -> None:\n self.text = re.sub('[^a-zA-z0-9\\n\\.\\s]', '', self.text)", "def remove_special_characters(text, remove_digits=False):\n pattern = r'[^a-zA-z0-9\\s]' if not remove_digits else r'[^a-zA-z\\s]'\n text = re.sub(pattern, '', text)\n return text", "def remove...
[ "0.8309218", "0.7856203", "0.7768344", "0.76876336", "0.7650726", "0.76494753", "0.76118165", "0.7510932", "0.74934494", "0.7467364", "0.74673146", "0.72453177", "0.72366244", "0.71997553", "0.7166965", "0.71665037", "0.7158556", "0.7108985", "0.70958817", "0.6979312", "0.688...
0.85286283
0
Removes special charaters with whitespace on left
def _remove_left_padded_special_chars(self, text: str) -> str: pattern = re.compile("\ +[^A-Za-z0-9\n]") text = re.sub(pattern, " ", text) return text
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def removeSpecialChars(self) -> None:\n self.text = re.sub('[^a-zA-z0-9\\n\\.\\s]', '', self.text)", "def _remove_special_chars(self, text: str) -> str:\n pattern = re.compile(self.special_chars_pattern)\n text = re.sub(pattern, \" \", text)\n return text", "def remove_special_chars...
[ "0.8474704", "0.8094379", "0.80897444", "0.8077067", "0.79241997", "0.7823599", "0.77800685", "0.77404356", "0.7715935", "0.76775664", "0.7647973", "0.7613351", "0.7509526", "0.74965906", "0.7397818", "0.73908854", "0.73888534", "0.73783976", "0.733939", "0.73306227", "0.7296...
0.76449716
11
Removes stopwords as defined by self.stop_words
def _remove_stopwords(self, text: str) -> str: pattern = r""" (?x) # Set flag to allow verbose regexps \w+(?:-\w+)* # Words with optional internal hyphens | \s* # Any space | [][!"#$%&'*+,-./:;<=>?@\\^():_`{|}~] # Any symbol """ symbol = " " return "".join( t if t not in self.stop_words else symbol for t in re.findall(pattern, text) )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def remove_stopwords_fun(self):\n tokens = str(self.doc).split()\n cleaned_tokens = [token for token in tokens\n if token.lower() not in self.stopword_list]\n self.doc = ' '.join(cleaned_tokens)", "def rm_stop_words(self, words):\n return [word for word in wor...
[ "0.85951626", "0.8196474", "0.8164273", "0.813838", "0.81174606", "0.80364287", "0.80274475", "0.7989631", "0.7988493", "0.7958696", "0.79517734", "0.7930331", "0.7930331", "0.7930331", "0.7930331", "0.7913143", "0.7845168", "0.7843913", "0.7841652", "0.7822895", "0.7813601",...
0.7446543
40
Removes tabs, newlines and any kind of space characters
def _remove_whitespaces(self, text: str) -> str: return " ".join(re.sub("\xa0", " ", str(text)).split())
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def clean_whitespace(text):\n return text\n #return re.sub(r'\\r\\n|\\n', \"\\t\", text)", "def clean(text):\n new = text.replace(\"\\r\", \"\")\n new = new.replace(\"\\t\", \"\")\n new = new.replace(\"\\n\", \"\")\n new = new.replace(\"- \", \"-\")\n new = new.replace(\" \", \" \")\n re...
[ "0.8449928", "0.7658336", "0.73809946", "0.7345701", "0.7265191", "0.726251", "0.7227112", "0.7216958", "0.7216958", "0.7206436", "0.72063667", "0.7186538", "0.71344566", "0.7107262", "0.70810986", "0.7080624", "0.7078933", "0.6926076", "0.6899674", "0.68935776", "0.6882423",...
0.6948386
17
Reduces multiple whitespaces to single whitespace
def _remove_extra_whitespaces(self, text: str) -> str: return re.sub(" +", " ", text)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def removeMultipleSpaces(self) -> None:\n self.text = re.sub('\\s+', ' ', self.text)", "def condense_whitespace(css):\n log.debug(\"Condensing all unnecessary white spaces.\")\n return re.sub(r\"\\s+\", \" \", css)", "def normalize_whitespace(text):\n return NORMALIZE_WHITESPACE_REGEX.sub(' ', ...
[ "0.80924445", "0.7733814", "0.77270997", "0.7487072", "0.7446078", "0.73546934", "0.7331565", "0.73040843", "0.72991526", "0.72107947", "0.7194275", "0.7159008", "0.7136506", "0.7133596", "0.71249396", "0.70942706", "0.70841247", "0.70305616", "0.70219487", "0.69447887", "0.6...
0.68979657
22
This appears to be totally unused, and leftover from testing. It may, in fact, be better/more efficient than the used walk_tree() method.
def work_tree(obj, **kwargs): max_depth = 0 exclusions = kwargs.get('exclusions', {"groups": [], "classes": [], "params": []}) groups_done = {} classes = {"depths": {}, "content": {}} params = {"depths": {}, "content": {}} if hasattr(obj, 'hostname') and not hasattr(obj, 'name'): obj.name = obj.hostname to_index = [(obj, 1)] while to_index: (obj, depth) = to_index.pop() if obj.name in groups_done and groups_done[obj.name] <= depth: continue objclasses = obj.classes.exclude(classname__in=exclusions['classes']) updated_classes = update_values(objclasses, "classname", "classparams", depth=depth, results=classes) objparams = obj.parameters.exclude(paramkey__in=exclusions['params']) updated_params = update_values(objparams, "paramkey", "paramvalue", depth=depth, results=params) if not updated_classes or not updated_params: return ("Fail", "Fail") groups_done[obj.name] = depth depth += 1 for group in obj.groups.exclude(name__in=exclusions['groups']): to_index.append((group, depth)) if max_depth < depth: max_depth = depth params["content"]['max_depth'] = max_depth params["content"]['done_count'] = len(groups_done) return (classes["content"], params["content"])
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def traverse_tree(file, tree):\n\n\tfor node in tree.get_children():\n\t\tpass", "def __walk_tree(self):\n for root, dirnames, files in os.walk(self.path, topdown=True):\n self.dirCount += 1\n # Create a tuple with the file size, the file name and the files inode (for tracking hard links).\n fi...
[ "0.6518105", "0.6321883", "0.6208348", "0.6140773", "0.6119242", "0.60756904", "0.5974666", "0.5938595", "0.5925983", "0.5857213", "0.58487475", "0.5806037", "0.5800146", "0.5788099", "0.5784158", "0.57694286", "0.57677877", "0.57413566", "0.5722563", "0.5715842", "0.56998444...
0.0
-1
Return data (tuple of classes, params) for a given host.
def get_host_data(hostname, gettype='walk'): filteredNodes = Node.objects.filter(hostname=hostname) if (filteredNodes.count() == 1): node = filteredNodes[0] exclusions = get_exclusions(node) if gettype == 'work': (classes, params) = work_tree(node, exclusions=exclusions) return (classes, params) elif gettype == 'optwork': (classes, params) = optimized_work_tree(node, exclusions=exclusions) return (classes, params) elif gettype == 'classwork': (classes, params) = work_tree2(node, exclusions=exclusions) return (classes, params) elif gettype == 'walk': (classes, params) = walk_tree(node, exclusions=exclusions) return (classes, params) else: return ({}, {})
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_host_data(self):\n\n raise NotImplementedError", "def get_host_variables(self, host):\n vars = {}\n for i in self.parsers:\n vars.update(i.get_host_variables(host))\n return vars", "def loadAllHostinfo():\n hidata={}\n str=\"\"\n keytypes=loadHostinfoKeys...
[ "0.6268357", "0.5708121", "0.5704203", "0.5604825", "0.5533057", "0.5413477", "0.54038036", "0.53823394", "0.52971464", "0.5290275", "0.5289944", "0.52613753", "0.52593875", "0.5238726", "0.52148", "0.5184951", "0.5168947", "0.51548314", "0.51526666", "0.5109809", "0.5093862"...
0.652414
0
Adds a node entry definition if there is no lower depth definition. Raises RuntimeError if the depth matches.
def add_entry(self, key, value, depth): current = self.entries.get(key, None) if current is None or current.depth > depth: self.entries[key] = NodeEntry(key, value, depth) elif current.depth == depth: raise RuntimeError('Collision [depth=%d] for entry [type=%s]: %s' % (depth, self.nodetype, key))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def add_entry(self, entry): # Hashmap.add_entry\n\n if entry.hexdigest in self.contentHash:\n self.contentHash[entry.hexdigest].append(entry)\n else:\n self.contentHash[entry.hexdigest] = [ entry ]\n\n if entry.depth < self.minDepth:\n self.minD...
[ "0.55656815", "0.5325866", "0.5322028", "0.52112687", "0.5193852", "0.5172305", "0.5172305", "0.51599175", "0.5064102", "0.49744448", "0.4960249", "0.49437156", "0.49024606", "0.48839802", "0.48804682", "0.4869079", "0.48573893", "0.48446208", "0.48397067", "0.48307618", "0.4...
0.6691731
0
Adds all the entries in objs at the current depth.
def add_entries(self, objs, keyname, valuename, depth): add_entry = self.add_entry for obj in objs: key = getattr(obj, keyname, None) if key is None: continue value = getattr(obj, valuename, None) add_entry(key, value, depth)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def addAll(self, objs):\n self.getSession().add_all(objs)\n self.commit() # paranoially\n return objs", "def add(self, fetchables, depth=1):\n if fetchables:\n if isinstance(fetchables, collections.Sequence):\n for fetchable in fetchables:\n ...
[ "0.61764467", "0.60474694", "0.57324225", "0.56951404", "0.55937594", "0.55879956", "0.558788", "0.54297394", "0.5391662", "0.53283435", "0.53044546", "0.52996117", "0.5273503", "0.5259931", "0.52462256", "0.5240757", "0.52057797", "0.51749694", "0.5160668", "0.5155546", "0.5...
0.76109475
0
Returns the entries as a key => value dict.
def as_dict(self): return dict((key, value) for key, value, depth in self.entries.itervalues())
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def to_dict(self):\n d = {}\n i = 0\n for entry in self.entries:\n d[i] = {}\n attributes = self.get_attribute_list()\n print (attributes)\n for data in attributes:\n d[i][data] = entry.__getattribute__(data)\n i = i + 1\n ...
[ "0.7613547", "0.7377054", "0.67987955", "0.63767034", "0.6352516", "0.6342387", "0.63202107", "0.6266719", "0.62579256", "0.6247313", "0.623333", "0.6207049", "0.62052894", "0.61866677", "0.61613494", "0.61424756", "0.613092", "0.61234504", "0.6120027", "0.6097869", "0.609334...
0.73771703
1
This appears to be totally unused, and leftover from testing. It may, in fact, be better/more efficient than the used walk_tree() method.
def work_tree2(obj, **kwargs): if 'exclusions' in kwargs: exclusions = kwargs['exclusions'] else: exclusions = Exclusions([], [], []) #groups_done = {} classes = NodeResults(nodetype='classes') params = NodeResults(nodetype='params') if hasattr(obj, 'hostname') and not hasattr(obj, 'name'): obj.name = obj.hostname to_index = [(obj, 1)] # loop opts index_pop = to_index.pop index_extend = to_index.extend egroups, eclasses, eparams = exclusions add_classes = classes.add_entries add_params = params.add_entries while to_index: (obj, depth) = index_pop() #objname = obj.name #if objname in groups_done and groups_done[objname] <= depth: #continue try: objclasses = obj.classes.exclude(classname__in=eclasses) add_classes(objclasses, "classname", "classparams", depth) objparams = obj.parameters.exclude(paramkey__in=eparams) add_params(objparams, "paramkey", "paramvalue", depth) except RuntimeError, e: return ("Fail", "Fail") # or just let it bubble up to the caller #groups_done[objname] = depth depth += 1 children = [(group, depth) for group in obj.groups.exclude(name__in=egroups)] index_extend(children) return classes.as_dict(), params.as_dict() # or (classes.entries, params.entries)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def traverse_tree(file, tree):\n\n\tfor node in tree.get_children():\n\t\tpass", "def __walk_tree(self):\n for root, dirnames, files in os.walk(self.path, topdown=True):\n self.dirCount += 1\n # Create a tuple with the file size, the file name and the files inode (for tracking hard links).\n fi...
[ "0.65168923", "0.63223314", "0.6207538", "0.61407065", "0.611664", "0.60764337", "0.59763837", "0.5936731", "0.5925743", "0.58564585", "0.5848043", "0.5805785", "0.5799455", "0.57884616", "0.57827556", "0.5768611", "0.57664794", "0.5741668", "0.57231015", "0.5716858", "0.5700...
0.0
-1
This appears to be totally unused, and leftover from testing. It may, in fact, be better/more efficient than the used walk_tree() method.
def optimized_work_tree(obj, **kwargs): exclusions = kwargs.get('exclusions', {"groups": [], "classes": [], "params": []}) groups_done = {} classes = {"depths": {}, "content": {}} params = {"depths": {}, "content": {}} if hasattr(obj, 'hostname') and not hasattr(obj, 'name'): obj.name = obj.hostname to_index = [(obj, 1)] index_pop = to_index.pop index_extend = to_index.extend while to_index: (obj, depth) = index_pop() objname = obj.name if objname in groups_done and groups_done[objname] <= depth: continue objclasses = obj.classes.exclude(classname__in=exclusions['classes']) updated_classes = optimized_update_values(objclasses, "classname", "classparams", depth=depth, results=classes) objparams = obj.parameters.exclude(paramkey__in=exclusions['params']) updated_params = optimized_update_values(objparams, "paramkey", "paramvalue", depth=depth, results=params) if not updated_classes or not updated_params: return ("Fail", "Fail") groups_done[objname] = depth depth += 1 children = ((group, depth) for group in obj.groups.exclude(name__in=exclusions['groups'])) index_extend(children) params['content']['done_count'] = len(groups_done) return (classes["content"], params["content"])
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def traverse_tree(file, tree):\n\n\tfor node in tree.get_children():\n\t\tpass", "def __walk_tree(self):\n for root, dirnames, files in os.walk(self.path, topdown=True):\n self.dirCount += 1\n # Create a tuple with the file size, the file name and the files inode (for tracking hard links).\n fi...
[ "0.6517461", "0.63216114", "0.620697", "0.61414737", "0.6116617", "0.60739696", "0.5977533", "0.5937574", "0.5928211", "0.58566964", "0.5848424", "0.58056533", "0.579866", "0.578824", "0.5784192", "0.5767787", "0.5766229", "0.57428783", "0.5723398", "0.5716726", "0.5701137", ...
0.0
-1
Create a WSGI application factory.
def create_wsgi_factory(mounts_factories): def create_wsgi(app, **kwargs): mounts = { mount: factory(**kwargs) for mount, factory in mounts_factories.items() } return DispatcherMiddleware(app.wsgi_app, mounts) return create_wsgi
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def create_app():\n app = Flask(__name__)\n app.config.from_object('app.configs.config')\n app.config.from_object('app.configs.settings')\n return app", "def create_app():\n from server.web import create_app\n # If we do a static javascript app via flask, add it here\n # from server.web impo...
[ "0.7593015", "0.75498307", "0.75089604", "0.7489719", "0.7488525", "0.74390584", "0.7435584", "0.7419176", "0.74065423", "0.7343458", "0.7322757", "0.730045", "0.72561944", "0.7253028", "0.72448945", "0.72247684", "0.719974", "0.7180518", "0.71766984", "0.7159296", "0.7157641...
0.6932318
48
Fix Flask environment according to ``XForwarded_`` headers.
def wsgi_proxyfix(factory=None): def create_wsgi(app, **kwargs): wsgi_app = factory(app, **kwargs) if factory else app.wsgi_app num_proxies = app.config.get("WSGI_PROXIES") proxy_config = app.config.get("PROXYFIX_CONFIG") if proxy_config and not WERKZEUG_GTE_014: return ProxyFix(wsgi_app, **proxy_config) elif num_proxies: warnings.warn( "The WSGI_PROXIES configuration is deprecated and " "it will be removed, use PROXYFIX_CONFIG instead", PendingDeprecationWarning, ) if WERKZEUG_GTE_014: return ProxyFix(wsgi_app, num_proxies=num_proxies) else: return ProxyFix(wsgi_app, x_for=num_proxies) return wsgi_app return create_wsgi
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def fixProxiedRequest( self, REQUEST ):\n # mod_proxy: X-Forwarded-Server\n # mod_accel: X-Host, X-Real-IP, X-URI, X-Method\n server = REQUEST.get('SERVER_URL')\n real_host = REQUEST.get('HTTP_X_FORWARDED_SERVER') or REQUEST.get('HTTP_X_HOST')\n real_addr = REQUEST.get('HTTP_X_RE...
[ "0.57845724", "0.5618568", "0.53275186", "0.5303353", "0.529868", "0.5294957", "0.52365404", "0.5180683", "0.5151824", "0.51268667", "0.5117508", "0.5112825", "0.5058677", "0.5011258", "0.5004951", "0.5002801", "0.49961042", "0.49657816", "0.49571234", "0.4935363", "0.4928755...
0.5541965
2
Create project parser method.
def pa_cmd(args, cmd): usage = "%s <options>" % command.USAGE.format(cmd) desc = command.DESCS[cmd] parser = argparse.ArgumentParser(usage=usage, description=desc) required = parser.add_argument_group('required named arguments') required.add_argument('-d', '--desc', help='The project description', required=True, type=str, dest="desc") required.add_argument('-o', '--owner', help='The project owner', required=True, type=str, dest="owner") parser.add_argument("-c", "--plmnid", dest="plmnid", default=None, help="The network PLMNID; default=None", type=str) parser.add_argument("-s", "--ssid", dest="ssid", default=None, help="The network SSID; default=None", type=SSID) parser.add_argument("-t", "--ssid_type", dest="ssid_type", default="unique", choices=["unique", "shared"], help="The network SSID type; default=unique") (args, leftovers) = parser.parse_known_args(args) return args, leftovers
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def build_parser(self):\n\n p = argparse.ArgumentParser(\n self.TITLE,\n formatter_class=argparse.ArgumentDefaultsHelpFormatter\n )\n p.add_argument(\n \"--name\",\n metavar=\"NAME\",\n help=\"Public name of the project, for docs etc.\",\n...
[ "0.7340575", "0.7248573", "0.71354264", "0.6643591", "0.6587333", "0.6457381", "0.6324095", "0.6283771", "0.6264125", "0.62213314", "0.62162375", "0.62034565", "0.62025255", "0.61537164", "0.61472064", "0.613323", "0.60847723", "0.6076085", "0.607107", "0.60409564", "0.602598...
0.0
-1
Add a new Project
def do_cmd(gargs, args, _): request = { "desc": args.desc, "owner": args.owner } if args.ssid: request["wifi_props"] = { "bssid_type": args.ssid_type, "ssid": args.ssid } if args.plmnid: plmnid = PLMNID(args.plmnid) request["lte_props"] = { "plmnid": plmnid.to_str() } headers = command.get_headers(gargs) url = '/api/v1/projects' response, _ = command.connect(gargs, ('POST', url), 201, request, headers=headers) location = response.headers['Location'] tokens = location.split("/") project_id = tokens[-1] print(project_id)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def add_project(project):\n print('add_project: ' + str(project))\n try_insert_or_update(models.projects.insert(), # pylint: disable=no-value-for-parameter\n [dict(\n name=project['name'], path=project['name'], active=True, user_id=current_user.id)])\n ...
[ "0.8426382", "0.772563", "0.75920844", "0.7583703", "0.7538995", "0.7501233", "0.7473201", "0.7460157", "0.74459225", "0.7423872", "0.73809856", "0.7358321", "0.73564327", "0.7351144", "0.7339243", "0.7321466", "0.7311935", "0.72474617", "0.7245785", "0.72370243", "0.72247237...
0.0
-1
Renders the contact page.
def data(): return render_template( 'data.html', title='data', year=datetime.now().year, message='my data page.' )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def contact():\n return render_template('contact.html')", "def contact():\n\n\treturn render_template('contact.html', title='Contact',\n\t\t\t\t\t\t year=datetime.now().year,\n\t\t\t\t\t\t message='Your contact page.')", "def contact():\n return render_template(\n 'contact.html',\n nav=...
[ "0.8343477", "0.83071643", "0.8217564", "0.8212342", "0.8212342", "0.8208706", "0.8185291", "0.8185291", "0.8185291", "0.8185291", "0.8185291", "0.8185291", "0.8185291", "0.8185291", "0.81841487", "0.8170303", "0.806876", "0.7832755", "0.7826334", "0.78097415", "0.779785", ...
0.0
-1
Renders the about page.
def about(): return render_template( 'about.html', title='About', year=datetime.now().year, message='about page.' )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def about():\n return render_template('about.html', title='About')", "def about():\n\n return render_template('about_page.html', title='About')", "def about():\n\n\treturn render_template(\"about.html\")", "def on_about(self):\n render_about_window()", "def about():\r\n return render_te...
[ "0.85650355", "0.85034525", "0.84489655", "0.84044397", "0.82935613", "0.82709616", "0.82709616", "0.82709616", "0.82709616", "0.82709616", "0.82709616", "0.82709616", "0.82709616", "0.82709616", "0.82709616", "0.82709616", "0.82709616", "0.82709616", "0.82680845", "0.8243358",...
0.8394454
4
The name to use for logging
def _log_name(): return os.path.splitext(os.path.basename(__file__))[0]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def logger_name(self):\n return self.__class__.__name__", "def log_stream_name(self) -> str:\n ...", "def log_name(self) -> Optional[str]:\n return self._log_name", "def logPrefix(self):\n return self.__class__.__name__", "def log_group_name(self) -> str:\n ...", "def l...
[ "0.82156974", "0.7782969", "0.77613235", "0.7558219", "0.7540767", "0.74446285", "0.73443824", "0.7230502", "0.7157241", "0.7145923", "0.7061045", "0.7061045", "0.6993713", "0.6953699", "0.6931624", "0.6909159", "0.6909159", "0.68735456", "0.6843457", "0.6843457", "0.6843457"...
0.76744807
3
Compute path of file relative to this module.
def _sibling_path(name): here = os.path.dirname(os.path.join(os.getcwd(), __file__)) return os.path.normpath(os.path.join(here, name))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_relative_pathname(self):\n return os.path.join(Exam.EXAM_FILES_LOCATION,\n str(self.unique_id)[0:2],\n str(self.unique_id) + self.file_ext)", "def get_path() -> str:\n return os.path.dirname(os.path.realpath(__file__))", "def file_path(sel...
[ "0.7948632", "0.7901577", "0.78639334", "0.77487487", "0.77206314", "0.769371", "0.76692975", "0.7637351", "0.76340103", "0.75847465", "0.7584325", "0.75806475", "0.756804", "0.7539383", "0.74923253", "0.7433047", "0.742732", "0.7422048", "0.7402005", "0.73825824", "0.7368132...
0.0
-1
Determine if a sysfs_gpu_name file indicates an AMD device
def _is_amd(sysfs_gpu_name): with open(sysfs_gpu_name) as src: return src.read().strip() == 'amdgpu'
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def create_gpu_device_if_present():\n d = dpctl.SyclDevice(\"gpu,cpu\")\n print(\"Selected \" + (\"GPU\" if d.is_gpu else \"CPU\") + \" device\")", "def is_system_usable_block_device(pydev_device):\n if pydev_device.get(\"ID_BUS\") == \"usb\":\n # Skip USB devices\n return False\n if py...
[ "0.6442702", "0.6077788", "0.60640377", "0.60526884", "0.6037231", "0.6019427", "0.601595", "0.599092", "0.5918574", "0.5823835", "0.5786603", "0.5764074", "0.5732489", "0.5730832", "0.5723959", "0.5701175", "0.56283104", "0.5625314", "0.56229156", "0.56229156", "0.5601314", ...
0.83339846
0
Determine the gpu index given a sysfs_gpu_name
def _amd_index(sysfs_gpu_name): drop_prefix = sysfs_gpu_name.strip()[len(_SYSFS_PREFIX):] return drop_prefix.split('/')[0]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _next_device(self):\n if self._num_gpus == 0:\n return ''\n dev = '/gpu:%d' % self._cur_gpu\n if self._num_gpus > 1:\n self._cur_gpu = (self._cur_gpu + 1) % (self._num_gpus-1)\n return dev", "def deviceid(gpu):\n\n # Return if this is already a torch device\n # pylint: dis...
[ "0.68445116", "0.6772102", "0.6700925", "0.6621827", "0.65894985", "0.6331592", "0.62258136", "0.62258136", "0.62258136", "0.61923295", "0.61809945", "0.6132849", "0.6131118", "0.61252695", "0.6123489", "0.6101626", "0.6023584", "0.6001534", "0.5931374", "0.59023625", "0.5886...
0.786728
0
Determines the path of the configuration file
def _cfg_path(argv): cfg_path = argv[1] if len(argv) > 1 else None _is_file = os.path.isfile if not cfg_path or not _is_file(cfg_path): if cfg_path: _info("no config at {}, trying the default location".format( cfg_path)) cfg_path = _DEFAULT_PATH if not _is_file(cfg_path): _info("no config at {}, exiting".format(cfg_path)) return None return cfg_path
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_config_file_location():\n\n return './' + CONFIG_FILE_NAME", "def _get_config_filepath(self):\n\t\tif self.configfilepath is None:\n\t\t\treturn os.path.join(self.workdir, \"config.txt\")\n\t\telse:\n\t\t\treturn self.configfilepath", "def configPath(self):\n return os.path.dirname(__file__)"...
[ "0.84382236", "0.83876586", "0.83710814", "0.8215269", "0.8205837", "0.8173699", "0.8055139", "0.7999764", "0.79920894", "0.792743", "0.78715116", "0.78524226", "0.7781477", "0.7710906", "0.76966274", "0.76633596", "0.7605815", "0.76037824", "0.75543994", "0.75123537", "0.746...
0.7623583
16
Configures logging logging_config.json should have been placed in the directory AUTOMINE_LOG_DIR, to which this process must have read and write access
def _configure_logger(): try: log_dir = os.environ['AUTOMINE_LOG_DIR'] log_name = _log_name() cfg_path = os.path.join(log_dir, 'logging_config.json') with open(cfg_path) as src: cfg = json.load(src) handlers = cfg.get('handlers') for handler in iter(handlers.values()): filename = handler.get('filename') if filename: filename = filename.replace('{{AUTOMINE_LOG_DIR}}', log_dir) filename = filename.replace('{{__name__}}', log_name) handler['filename'] = filename loggers = cfg.get('loggers') if '__name__' in loggers: loggers[log_name] = loggers.pop('__name__') # add logging to the console if env var is set log_to_console = 'AUTOMINE_LOG_TO_CONSOLE' in os.environ if log_to_console and 'console' in handlers: logger_handlers = loggers[log_name].get('handlers') if logger_handlers: logger_handlers.append('console') dictConfig(cfg) except Exception as err: # pylint: disable=broad-except logging.basicConfig() raise err
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def setup_logging():\n name_json = 'logging_config.json'\n path_json = os.path.join(os.path.dirname(__file__), name_json)\n with open(path_json, 'r') as f_json:\n dict_config = json.load(f_json)\n logging.config.dictConfig(dict_config)", "def setup_logging(save_dir, log_config='logger/logger_c...
[ "0.82155377", "0.75840616", "0.73912275", "0.7389693", "0.72844446", "0.7030196", "0.70190215", "0.7017492", "0.7016571", "0.6946439", "0.69230664", "0.68668836", "0.6850633", "0.6848561", "0.6812513", "0.6809024", "0.6786257", "0.6777338", "0.6761215", "0.6757546", "0.674431...
0.81726515
1
The command line entry point
def main(argv=None): if argv is None: argv = sys.argv try: _configure_logger() cfg_path = _cfg_path(argv) if not cfg_path: return 1 the_cfg = json.load(open(cfg_path)).get('amdgpu') if not isinstance(the_cfg, dict): raise ValueError("missing config in {}".format(cfg_path)) _info("loaded config from {0}".format(cfg_path)) perform_overclock(the_cfg) return 0 except ValueError: _LOG.error( "error using the config: %s, exiting", cfg_path, exc_info=True) return 1 except Exception: # pylint: disable=broad-except _LOG.error('could not perform overclock', exc_info=True) return 1
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def main(args=None):", "def main(args=None):", "def main(args):", "def main(args):", "def main(args=None):\n pass", "def main():\n return", "def main():\n pass", "def main(self) -> None:\n pass", "def main() -> None:", "def main() -> None:", "def main() -> None:", "def mai...
[ "0.82658285", "0.82658285", "0.8260238", "0.8260238", "0.8001488", "0.78304154", "0.77982444", "0.7783042", "0.7692148", "0.7692148", "0.7692148", "0.7692148", "0.76892644", "0.7674975", "0.7674975", "0.7674975", "0.76543075", "0.76094264", "0.7606937", "0.7606937", "0.760693...
0.0
-1
Endpoint to display individual item.
def read_item(item_name, catagory_name): item = Item.fetch_by_name_and_catagory_name(item_name, catagory_name) return render_template('item.html', item=item)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def show(self, item_id):\n pass", "def showItem(category_item_id):\n return render_template('item.html', item=db.findItem(id=category_item_id))", "def get(self, item_id: int):\n\n try:\n\n controller = self.controller()\n schema = self.schema()\n ...
[ "0.7974844", "0.69679177", "0.69007516", "0.68516976", "0.68477976", "0.6822715", "0.6821025", "0.6692358", "0.6668874", "0.662971", "0.65989023", "0.65882397", "0.6572797", "0.6545979", "0.6537464", "0.652906", "0.6496628", "0.646886", "0.646886", "0.646886", "0.64111596", ...
0.56846815
75
Endpoint to display create item page.
def create_item_page(): catagories = [c.name for c in Catagory.fetch_all()] return render_template('add_item.html', catagories=catagories, values={})
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def newItem():\n if request.method == 'POST':\n db.createItem(\n title=request.form['title'],\n description=request.form['description'],\n category_id=request.form['category'],\n user_id=login_session['user_id'])\n flash(\"New catalog item created!\", 's...
[ "0.74668145", "0.71456575", "0.6886429", "0.6816857", "0.68146276", "0.6764137", "0.65232", "0.6477503", "0.6445127", "0.6436602", "0.6388984", "0.6381548", "0.634037", "0.6285156", "0.6273723", "0.624713", "0.62171143", "0.6188557", "0.6186302", "0.61858845", "0.61284363", ...
0.7436075
1
Post endpoint to create an item. If form is invalid will return create item page with errors displayed, otherwise create item and redirect to item page.
def create_item(): name = request.form['name'] catagory = request.form['catagory'] description = request.form['description'] errors = form_errors(request.form) if errors: catagories = [c.name for c in Catagory.fetch_all()] values = { 'name': name, 'catagory': catagory, 'description': description } return render_template( 'add_item.html', catagories=catagories, values=values, errors=errors ) Item.create(name, catagory_name=catagory, description=description) return redirect(url_for( 'read_item', catagory_name=catagory, item_name=name ))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def new_item():\n form = ItemForm()\n user = current_user\n\n # If the form is validated, add its data to the database\n if form.validate_on_submit():\n\n # Check that an item with the same name and sport does not\n # already exist, or send a flash message and do not add the\n # ne...
[ "0.7512342", "0.74192584", "0.7168803", "0.7162308", "0.6992464", "0.68776226", "0.6828334", "0.67595434", "0.6719401", "0.67075944", "0.66212463", "0.66060036", "0.65833145", "0.65363026", "0.6523961", "0.6484986", "0.6414084", "0.6341097", "0.6312691", "0.6282003", "0.62772...
0.7602898
0
Endpoint to display update item page.
def update_item_page(item_name, catagory_name): item = Item.fetch_by_name_and_catagory_name(item_name, catagory_name) catagories = [c.name for c in Catagory.fetch_all()] return render_template( 'edit_item.html', catagories=catagories, values={ 'name': item.name, 'catagory': item.catagory_name, 'description': item.description }, )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def item_update(request):\n if request.method == 'POST':\n item_to_update = get_object_or_404(StockItem, pk=request.POST['id'])\n item_to_update.name = request.POST['name']\n item_to_update.count = int(request.POST['count'])\n item_to_update.date_of_expiration = request.POST['exp']\n...
[ "0.6676208", "0.64609563", "0.64174616", "0.63904625", "0.63857603", "0.6339873", "0.6314984", "0.62889534", "0.6269479", "0.6255692", "0.6253654", "0.624584", "0.62428105", "0.6238067", "0.62355477", "0.6230332", "0.62169313", "0.6211939", "0.62091535", "0.61984015", "0.6185...
0.675567
0
Post endpoint to update an item. If form is invalid will return create item page with errors displayed, otherwise update item and redirect to item page.
def update_item(item_name, catagory_name): try: item = Item.fetch_by_name_and_catagory_name(item_name, catagory_name) except NoResultFound: abort(404) errors = form_errors(request.form) new_item_name = request.form.get('name') new_catagory_name = request.form.get('catagory') new_description = request.form.get('description') if errors: values = { 'name': new_item_name, 'catagory': new_catagory_name, 'description': new_description } catagories = [c.name for c in Catagory.fetch_all()] return render_template( 'add_item.html', catagories=catagories, values=values, errors=errors ) item.update( name=new_item_name, catagory_name=new_catagory_name, description=new_description ) return redirect(url_for( 'read_item', item_name=new_item_name, catagory_name=new_catagory_name ))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def edit_item(item_name):\n item = Item.query.filter_by(name=item_name).first()\n\n # If the URL contains a bad item name, send a 404\n if not item:\n abort(404)\n\n # If the current user is not authorized to edit the item because\n # the item was created by a different user, send a 403\n ...
[ "0.72878253", "0.71200603", "0.67693913", "0.6749517", "0.6684326", "0.66837543", "0.66720814", "0.66635346", "0.66288364", "0.65429723", "0.6519543", "0.6503763", "0.6453958", "0.6416763", "0.63206726", "0.63055027", "0.62442046", "0.6199904", "0.6198838", "0.6180717", "0.60...
0.6661015
8
Endpoint to display confirm delete item page.
def delete_item_page(item_name, catagory_name): return render_template( 'delete_item.html', item_name=item_name, catagory_name=catagory_name )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def delete_item_details(item_id):\n item = is_user_the_creator(item_id)\n item_name = item.Item.name\n if request.method == 'GET':\n return render_template('item_delete_confirm.html', item_name=item_name, item_id=item_id,\n login_session=login_session,\n ...
[ "0.7169478", "0.6802842", "0.67705566", "0.66972566", "0.6590073", "0.6553968", "0.65416104", "0.65388113", "0.6523968", "0.6497277", "0.6489816", "0.64742243", "0.6455277", "0.643404", "0.6403796", "0.6396889", "0.6391679", "0.63716215", "0.63338363", "0.63180286", "0.631778...
0.6615197
4
Post endpoint to delete item. Redirects to home.
def delete_item(item_name, catagory_name): try: item = Item.fetch_by_name_and_catagory_name(item_name, catagory_name) except NoResultFound: abort(404) item.delete() return redirect(url_for('home'))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def delete_item(request):\n if request.json_body[u'type'] == u'post':\n if DBSession.query(Post).filter(Post.name==request.json_body[u'name']).delete() == 1:\n return {\"deletion_status\":\"success\"}\n import ipdb; impdb.set_trace()\n return {\"deletion_statu...
[ "0.7321518", "0.7296745", "0.7259978", "0.7202219", "0.70292133", "0.70238733", "0.70196456", "0.6984477", "0.69794494", "0.6974444", "0.6911067", "0.68968356", "0.6894365", "0.68761015", "0.6870689", "0.6862545", "0.6816911", "0.6768796", "0.6750122", "0.674466", "0.6742463"...
0.680767
17
Return dict containing form validation errors for create / update item.
def form_errors(form): errors = {} max_name_length = Item.name.property.columns[0].type.length if not form.get('name', None): errors['name'] = 'Please enter a name.' elif len(form['name']) > max_name_length: errors['name'] = ( 'Name must be less than %s characters.' % max_name_length ) if not Catagory.exists(form.get('catagory', None)): errors['catagory'] = 'Not a valid catagory.' if not form.get('description', None): errors['description'] = 'Please enter a description.' return errors
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _get_form_error(self):\n errors = {}\n if self._form_error:\n errors[\"base\"] = self._form_error\n self._form_error = None\n return errors", "def errors(self):\n _errors = {}\n # pylint: disable=no-member\n for name, field in self._fields.items...
[ "0.7247966", "0.65767854", "0.65024495", "0.6369957", "0.62522954", "0.6153304", "0.6130591", "0.6103089", "0.6074817", "0.6054684", "0.5964537", "0.5951978", "0.594963", "0.59282666", "0.59004563", "0.58859175", "0.5877393", "0.5859856", "0.58356667", "0.5771657", "0.5703528...
0.7545022
0
This method splits each document in the batch into chunks wuth the maximal length of max_chunk_len
def __call__(self, docs_batch: List[str]) -> Tuple[List[List[str]], List[List[int]]]: text_batch_list = [] text_batch = [] nums_batch_list = [] nums_batch = [] count_texts = 0 text = "" curr_doc = 0 for n, doc in enumerate(docs_batch): sentences = sent_tokenize(doc) for sentence in sentences: if len(text) + len(sentence) < self.max_chunk_len and n == curr_doc: text += f"{sentence} " else: if count_texts < self.batch_size: text_batch.append(text.strip()) if n == curr_doc: nums_batch.append(n) else: nums_batch.append(n - 1) count_texts += 1 else: text_batch_list.append(text_batch) text_batch = [] nums_batch_list.append(nums_batch) nums_batch = [n] count_texts = 0 curr_doc = n text = f"{sentence} " if text: text_batch.append(text.strip()) text_batch_list.append(text_batch) nums_batch.append(len(docs_batch) - 1) nums_batch_list.append(nums_batch) return text_batch_list, nums_batch_list
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def batch_split(self, batch_text, threads=8):\n pass", "def chunks(self, list_to_chunk, size):\n for i in range(0, len(list_to_chunk), size):\n yield list_to_chunk[i:i + size]", "def split_chunk(chunk, sizes, max_iter=1000, rng=None):\n assert len(chunk) == sum(sizes), f\"{len(chunk...
[ "0.67054933", "0.6599035", "0.6590152", "0.6497465", "0.6494132", "0.6458296", "0.6434048", "0.63691735", "0.6354726", "0.6328011", "0.6231897", "0.62225235", "0.61968756", "0.6193412", "0.61856794", "0.61689913", "0.6143832", "0.61328125", "0.61289346", "0.6128609", "0.61256...
0.0
-1
Chooses a BoTorch `Model` using the given data.
def choose_model_class( Xs: List[Tensor], Ys: List[Tensor], Yvars: List[Tensor], task_features: List[int], fidelity_features: List[int], ) -> Type[Model]: if len(task_features) > 0: raise NotImplementedError("Currently do not support `task_features`!") if len(fidelity_features) > 1: raise NotImplementedError("Currently support only a single fidelity parameter!") # NOTE: We currently do not support `task_features`. This code block will only # be relevant once we support `task_features`. # if len(task_features) > 1: # raise NotImplementedError( # f"This model only supports 1 task feature (got {task_features})" # ) # elif len(task_features) == 1: # task_feature = task_features[0] # else: # task_feature = None task_feature = None # NOTE: In the current setup, `task_feature = None` always. if task_feature is None: Yvars_cat = torch.cat(Yvars).clamp_min_(MIN_OBSERVED_NOISE_LEVEL) is_nan = torch.isnan(Yvars_cat) any_nan_Yvar = torch.any(is_nan) all_nan_Yvar = torch.all(is_nan) if any_nan_Yvar and not all_nan_Yvar: raise ValueError( "Mix of known and unknown variances indicates valuation function " "errors. Variances should all be specified, or none should be." ) if len(fidelity_features or []) > 0: return SingleTaskMultiFidelityGP elif all_nan_Yvar: return SingleTaskGP return FixedNoiseGP # TODO: Replace ValueError with `ModelListGP`. # raise ValueError("Unexpected training data format. Cannot choose `Model`.")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def select_model(model_name: str):\r\n global predictor, currently_selected_model\r\n predictor = FeatureExtractor(model_name)\r\n currently_selected_model = model_name", "def get_model():\n SUPPORTED_DATASETS = ('imagenet', 'cifar10', 'mnist')\n\n # ensure the dataset is supported\n dataset = ...
[ "0.66947436", "0.6254575", "0.6168464", "0.6149604", "0.61225873", "0.6105856", "0.6098028", "0.6089826", "0.6040291", "0.6034758", "0.60276425", "0.599368", "0.5964817", "0.58874965", "0.5885631", "0.58818895", "0.58405644", "0.58335435", "0.580409", "0.58008975", "0.5786825...
0.0
-1
r"""Chooses a BoTorch `MarginalLogLikelihood` class using the given `Model` class.
def choose_mll_class( model_class: Type[Model], state_dict: Optional[Dict[str, Tensor]] = None, refit: bool = True, ) -> Type[MarginalLogLikelihood]: # NOTE: We currently do not support `ModelListGP`. This code block will only # be relevant once we support `ModelListGP`. if (state_dict is None or refit) and issubclass(model_class, ModelListGP): return SumMarginalLogLikelihood return ExactMarginalLogLikelihood
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __init__(self, likelihood, model):\n if not isinstance(likelihood, GaussianLikelihood):\n raise RuntimeError(\"Likelihood must be Gaussian for exact inference\")\n super(ExactMarginalLogLikelihood, self).__init__(likelihood, model)", "def from_botorch(\n cls,\n model: M...
[ "0.6100431", "0.5624646", "0.55522686", "0.55522686", "0.536249", "0.53345406", "0.5269471", "0.52394426", "0.5220503", "0.51793855", "0.51639843", "0.508774", "0.50566566", "0.505593", "0.50103873", "0.50024384", "0.49973372", "0.4989311", "0.49551207", "0.49261236", "0.4925...
0.74493885
0
r"""Chooses a BoTorch `AcquisitionFunction` class.
def choose_botorch_acqf_class() -> Type[AcquisitionFunction]: # NOTE: In the future, this dispatch function could leverage any # of the attributes of `BoTorchModel` or kwargs passed to # `BoTorchModel.gen` to intelligently select acquisition function. return qNoisyExpectedImprovement
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __init__(self, acquisition_functions):\n self.acquisition_functions = acquisition_functions", "def _optimise_acquisition(acq_fn, acq_optimiser, anc_data):\n return acq_optimiser(acq_fn, anc_data.max_evals)", "def _optimise_acquisition(acq_fn, acq_optimiser, anc_data):\n if anc_data.acq_opt_method ...
[ "0.6058467", "0.5935972", "0.58589655", "0.5788014", "0.548214", "0.5331167", "0.53201264", "0.53140235", "0.53060716", "0.5304556", "0.5121495", "0.5052555", "0.5037035", "0.50274396", "0.500004", "0.49999496", "0.49726632", "0.49604744", "0.49250162", "0.48792323", "0.48333...
0.73012865
0
Construct a `TrainingData` object based on sizes of Xs, Ys, and Yvars, and the type of model, for which the training data is intended.
def construct_training_data( Xs: List[Tensor], Ys: List[Tensor], Yvars: List[Tensor], model_class: Type[Model] ) -> TrainingData: if not isclass(model_class): # pragma: no cover raise ValueError( f"Expected `Type[Model]`, got: {model_class} " f"(type: {type(model_class)})." ) if len(Xs) == len(Ys) == 1: # Just one outcome, can use single model. return TrainingData(X=Xs[0], Y=Ys[0], Yvar=Yvars[0]) elif issubclass(model_class, BatchedMultiOutputGPyTorchModel) and all( torch.equal(Xs[0], X) for X in Xs[1:] ): # All Xs are the same and model supports batched multioutput. return TrainingData( X=Xs[0], Y=torch.cat(Ys, dim=-1), Yvar=torch.cat(Yvars, dim=-1) ) elif model_class is ModelListGP: # pragma: no cover # TODO: This will be case for `ListSurrogate`. raise NotImplementedError("`ModelListGP` not yet supported.") raise ValueError(f"Unexpected training data format for {model_class}.")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def data_creator(config):\n train_dataset, val_dataset = LinearDataset(2, 5), LinearDataset(2, 5)\n train_loader = DataLoader(train_dataset, batch_size=config[\"batch_size\"])\n val_loader = DataLoader(val_dataset, batch_size=config[\"batch_size\"])\n return train_loader, val_loader", "def _unpack_tr...
[ "0.62350506", "0.6225271", "0.6160196", "0.6157232", "0.6134622", "0.61098945", "0.6080254", "0.606203", "0.60147977", "0.60086304", "0.59555346", "0.5948243", "0.5919351", "0.59027153", "0.5900304", "0.58960056", "0.5879972", "0.58337307", "0.5829074", "0.5811363", "0.580975...
0.8095568
0
Validates that Xs, Ys, Yvars, and metric names all have equal lengths.
def validate_data_format( Xs: List[Tensor], Ys: List[Tensor], Yvars: List[Tensor], metric_names: List[str] ) -> None: if len({len(Xs), len(Ys), len(Yvars), len(metric_names)}) > 1: raise ValueError( # pragma: no cover "Lengths of Xs, Ys, Yvars, and metric_names must match. Your " f"inputs have lengths {len(Xs)}, {len(Ys)}, {len(Yvars)}, and " f"{len(metric_names)}, respectively." )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def check_consistent_length(y_true: List[List[str]], y_pred: List[List[str]]):\n len_true = list(map(len, y_true))\n len_pred = list(map(len, y_pred))\n is_list = set(map(type, y_true)) | set(map(type, y_pred))\n\n if len(y_true) != len(y_pred) or len_true != len_pred:\n message = 'Found input v...
[ "0.6732428", "0.65989214", "0.65425104", "0.64392585", "0.6399168", "0.63487905", "0.6343038", "0.62423515", "0.61932313", "0.6176423", "0.6091993", "0.6081119", "0.60686547", "0.6042439", "0.603004", "0.60111535", "0.601055", "0.60092235", "0.59968686", "0.5972382", "0.59626...
0.8259363
0
Extract acquisition and optimizer options from `model_gen_options`.
def construct_acquisition_and_optimizer_options( acqf_options: TConfig, model_gen_options: Optional[TConfig] = None ) -> Tuple[TConfig, TConfig]: acq_options = acqf_options.copy() opt_options = {} if model_gen_options: acq_options.update( checked_cast(dict, model_gen_options.get(Keys.ACQF_KWARGS, {})) ) # TODO: Add this if all acq. functions accept the `subset_model` # kwarg or opt for kwarg filtering. # acq_options[SUBSET_MODEL] = model_gen_options.get(SUBSET_MODEL) opt_options = checked_cast( dict, model_gen_options.get(Keys.OPTIMIZER_KWARGS, {}) ).copy() return acq_options, opt_options
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _get_build_options(cls, opt: Opt):\n query_model = 'bert'\n document_model = 'bert'\n query_path = opt['model_file']\n document_path = opt['model_file']\n try:\n # determine if loading a RAG model\n loaded_opt = Opt.load(f\"{query_path}.opt\")\n ...
[ "0.6497855", "0.6085382", "0.57732934", "0.56216425", "0.55810773", "0.5563721", "0.54467785", "0.54459274", "0.54262084", "0.54096705", "0.53853124", "0.5346946", "0.5331329", "0.53296965", "0.53257495", "0.5312863", "0.5310399", "0.52858835", "0.52766055", "0.52655095", "0....
0.6829646
0
Update the state of the hash object.
def update(self, data): self._total_length += len(data) self._buffer += data # A digest calculated for 240 bytes or less of data will use # self._seed and self._secret (at least one of which is the # default) directly whereas the digest calulated for more than # 240 bytes will use only self._secret. However, if a # non-default seed was provided (and not discarded because both # a seed and a secret were mistakenly provided) then # self._secret must be redefined to a secret generated from # self._seed (but only for more than 240 bytes of input data). # # Because of this, update() does nothing but store the data # until more than 240 bytes have been added. Then, it redefines # self._secret (if self.seed != 0) before continuing. So as to # do this only during the first call to update() in which there # is sufficient data, self._acc is also intialized at that time, # and the process is skipped if self._acc is already initialized. if self._total_length <= 240: return if self._acc is None: # There is sufficient data that _update_hashlong() will be # used and this is the first call to update that ensures # this. So, do setup for _update_hashlong(). self._acc = [ self._P32_3, self._P64_1, self._P64_2, self._P64_3, self._P64_4, self._P32_2, self._P64_5, self._P32_1, ] self._last_stripe = b"" if self._seed != 0: self._secret = self._customsecret(self._seed) # _update_hashlong() will consume as much of self._buffer # as possible. self._update_hashlong()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def update(self):\n self._state = self._state", "def hash(self, hash):\n\n self._hash = hash", "def hash(self, hash):\n\n self._hash = hash", "def update(self):\n self.write_state(bytes([]))", "def update(self):\n self._state = 23", "def _update_state(self) -> None:\n ...
[ "0.6944641", "0.6727618", "0.6727618", "0.66924864", "0.6656745", "0.6560295", "0.65587795", "0.6553051", "0.65319693", "0.6484831", "0.6480291", "0.6452417", "0.6441518", "0.6440476", "0.64331573", "0.6417873", "0.6360725", "0.6358439", "0.6283338", "0.62017864", "0.6175439"...
0.0
-1
Return the hash digest as a 64bit unsigned integer. This is the typical output format of the `reference implementation`_.
def intdigest(self): if self._total_length <= 240: if self._total_length == 0: return self._len_0() elif self._total_length <= 3: return self._len_1to3() elif self._total_length <= 8: return self._len_4to8() elif self._total_length <= 16: return self._len_9to16() elif self._total_length <= 128: return self._len_17to128() elif self._total_length <= 240: return self._len_129to240() # self._update_hashlong() has consumed as much of self._buffer # as possible. self._finalize_hashlong() will complete the # hash process return self._finalize_hashlong()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def hash64bits(*args):\n # 64 bits hexdigest\n h = hashlib.sha1(bytes(repr(args), \"utf-8\")).hexdigest()[:16]\n # Convert to an integer and return\n return int(h, 16)", "def digest(self):\n # For discussion of big-endian vs little-endian for the hash\n # digest of XXHASH algorithms, se...
[ "0.76398647", "0.7091548", "0.6616426", "0.66112274", "0.66112274", "0.65616864", "0.6554457", "0.6453391", "0.63296676", "0.6313555", "0.63109", "0.6277919", "0.6270182", "0.6270182", "0.62685853", "0.62674755", "0.6248589", "0.624666", "0.622734", "0.61998284", "0.61998194"...
0.0
-1
Return the hash digest as a bytes object. This is the bigendian representation of the value returned by ``intdigest()`` and is equivalent to the output of the ``XXH64_canonicalFromHash()`` function in the `reference implementation`_ applied to the value returned by ``intdigest()``.
def digest(self): # For discussion of big-endian vs little-endian for the hash # digest of XXHASH algorithms, see # https://github.com/Cyan4973/xxHash/issues/45 return struct.pack(">Q", self.intdigest())
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def hexdigest(self):\n return self.hashObject.hexdigest()", "def digest(self):\n return self._hash", "def hash(self) -> bytes:", "def digest(self):\n return digest_tools.sha256_digest(self._payload.as_encoded_str())", "def hash(self):\n return Hash.dhash(bytes(self))", "def he...
[ "0.69930434", "0.69264513", "0.67882836", "0.6712462", "0.66841334", "0.66802466", "0.6673555", "0.6600667", "0.6553306", "0.6533125", "0.6499518", "0.6499283", "0.6481292", "0.6456057", "0.6456057", "0.64141536", "0.6401427", "0.6399107", "0.638602", "0.63651985", "0.6290785...
0.78258497
0
Return the hash digest as a string of hexidecimal digits. This is the value returned by ``digest()`` expressed as a printable hex string for easy display.
def hexdigest(self): # bytes.hex() is simpler, but not available For Python <= 3.4 return "".join("{0:0>2x}".format(b) for b in self.digest())
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def hexdigest(self):\r\n return ''.join(['%02x' % ord(c) for c in self.digest()])", "def hexdigest(self):\n return \"\".join(\"%02x\" % ord(x)\n for x in MegaCrypto.a32_to_str(self.digest()))", "def hexdigest(self):\n return self.hashObject.hexdigest()", "de...
[ "0.8446957", "0.8241606", "0.8034233", "0.7728154", "0.7493876", "0.72516394", "0.7215683", "0.70375633", "0.6966864", "0.69554013", "0.6895401", "0.6893466", "0.6879853", "0.6794466", "0.6791125", "0.6765434", "0.67632526", "0.67488396", "0.6718266", "0.6668175", "0.6663794"...
0.8364801
1
Return a copy (clone) of the hash object.
def copy(self): cp = self.__class__() # create a new instance of the subclass # copy current state to the new instance cp._acc = self._acc cp._seed = self._seed cp._secret = self._secret cp._last_stripe = self._last_stripe cp._total_length = self._total_length cp._buffer = self._buffer return cp
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def clone(self):\n memo = dict()\n c = self._clone(memo)\n c._clone_rip(memo)\n return c", "def clone(self):\n return self.copy()", "def clone(self):\n clone = super(LongObjectHashMap, self).clone()\n clone.clear()\n clone.initialize()\n for key in...
[ "0.76727754", "0.7490999", "0.7473199", "0.74517614", "0.74517614", "0.74517614", "0.74517614", "0.74502987", "0.7433837", "0.74067515", "0.73838836", "0.72835857", "0.72783124", "0.72249395", "0.72157496", "0.7209674", "0.71724945", "0.71661943", "0.71661943", "0.71661943", ...
0.0
-1
Prepare the batch export of tag definitions to GetResponse
def tag_export_batch(session, model_name, backend_id, domain=None, fields=None, delay=False, **kwargs): connector_env = get_environment(session, model_name, backend_id) # Get the exporter connector unit batch_exporter = connector_env.get_connector_unit(TagBatchExporter) # Start the batch export batch_exporter.batch_run(domain=domain, fields=fields, delay=delay, **kwargs)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _create_response_objects(self) -> list[JsonDict]:\n responses = []\n for feat_type, feat_name, _ in self.features:\n if feat_type.is_array():\n feat_name = cast(str, feat_name) # can only be string since it's an array type\n responses.append(SentinelHubRe...
[ "0.5429554", "0.5371432", "0.52643687", "0.5249134", "0.5248237", "0.52403224", "0.52280647", "0.52162", "0.51670784", "0.5164601", "0.5159841", "0.51428187", "0.5142124", "0.5130368", "0.51142645", "0.5101176", "0.50706816", "0.5053283", "0.504867", "0.5047648", "0.50421995"...
0.0
-1
Check if the specified instance matches the service's model.
def _isinstance(self, instance, raise_error=True): if isinstance(instance, self.__model__): return True elif raise_error: raise ValueError('{} is not of type {}.'.format( instance, self.__model__, )) else: return False
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def checkModel(self, model):\n # TODO", "def test_valid_model(self):\n model_cls = ModelContainer(APP_LABEL, TestModel2._meta.db_table).model_cls\n self.assertTrue(model_cls.__class__.__name__ is models.Model.__class__.__name__)", "def have_this_instance(self, instance):\n for i in ...
[ "0.6658003", "0.6365422", "0.63516694", "0.59845364", "0.59756815", "0.59461135", "0.5938544", "0.59184885", "0.5915191", "0.59148186", "0.5906111", "0.59015507", "0.5884941", "0.585603", "0.5847129", "0.58132994", "0.57774276", "0.57701457", "0.5730501", "0.57222885", "0.572...
0.650044
1
Return a generator containing all instances of the model.
def all(self): return self.__model__.query.all()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def all_model_instances(self) -> Iterator['panda_core_data.model.Model']:\n for current_type in self.all_models:\n for current_instance in current_type.all_instances:\n yield current_instance", "def fetchall(self):\n rows = self.cursor.fetchall()\n\n if self.model.s...
[ "0.8044509", "0.7411966", "0.734941", "0.7186237", "0.71022946", "0.70422894", "0.68495035", "0.66786873", "0.66777647", "0.6615131", "0.653405", "0.65155786", "0.65155786", "0.64561963", "0.6453592", "0.64051664", "0.6363701", "0.6355945", "0.6348062", "0.6272981", "0.624392...
0.5920896
90
Commit the instance to the database and return it.
def save(self, instance, commit=True): self._isinstance(instance) db.session.add(instance) if commit: db.session.commit() return instance
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def commit(self):\n if not getattr(self, '_id', None):\n return self._create()\n res = self._update()\n self._dirty = False\n return res", "def commit(self):\n try:\n DBSESSION.add(self)\n DBSESSION.commit()\n return self\n exc...
[ "0.8033429", "0.7885448", "0.7634681", "0.73922426", "0.7373556", "0.7361513", "0.7327505", "0.7292507", "0.72648495", "0.71835274", "0.7170746", "0.71401006", "0.71388215", "0.71369076", "0.71341854", "0.7112524", "0.70495015", "0.70453817", "0.700578", "0.6989802", "0.69865...
0.7144451
11
Reads the spectrial thermal conductivity information
def read_spectral_k(filename="tc_dos_l.dat"): # column headers for the data #tcdosl_labels = [ # "wavelength", # "k_xx_raw","k_xx_smooth", # "k_yy_raw","k_yy_smooth", # "k_zz_raw","k_zz_smooth"] tcdosl_labels = [ "wavelength", "k_xx_raw","k_yy_raw","k_zz_raw", "k_xx_smooth","k_yy_smooth","k_zz_smooth"] def subselect_table_block(i_start,lines): i = i_start + 1 table = [] while(lines[i].strip() != ""): args = lines[i].split() args = [arg.strip() for arg in args] args = [float(arg) for arg in args] table.append(args) i += 1 return np.array(table) line = None # initialize with open(filename,'r') as f: lines = f.readlines() lines = [s.strip() for s in lines] temperatures = [] tcdosl_dict = OrderedDict() for il,line in enumerate(lines): if line.startswith('# Temp:'): args = line.split(':') T = int(float(args[1].strip())) temperatures.append(T) tcdosl_dict[T] = subselect_table_block(il,lines) tcdosl_df_dict = OrderedDict() for temp in temperatures: tcdosl_df_dict[temp] = pd.DataFrame( copy.deepcopy(tcdosl_dict[temp]), columns=list(tcdosl_labels)) return {k:v.copy() for k,v in tcdosl_df_dict.items()}
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_chip_temperature(self):\n self.check_validity()\n\n return self.ipcon.send_request(self, BrickletIndustrialDualAnalogInV2.FUNCTION_GET_CHIP_TEMPERATURE, (), '', 10, 'h')", "def ccdtemp(n=2):\n temp = camera.GetTemperature()\n camera.status.update()\n mesg, f1, f2, f3, f4 = camera.GetTemp...
[ "0.61936766", "0.6150327", "0.60563195", "0.60492754", "0.59827816", "0.5981612", "0.5974009", "0.5960181", "0.5955466", "0.5945766", "0.5935459", "0.5898904", "0.58918184", "0.58871585", "0.58717936", "0.58078927", "0.5802359", "0.5800536", "0.5767256", "0.5763488", "0.57244...
0.6091845
2
Verifies simple ordering. IE '1' < '2' < '10' < '11' < '20' < '21'
def test_sort(): data = ["filename_{}.py".format(i) for i in range(200)] temp = data[:] random.shuffle(temp) assert data == sort(temp)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_sorting():\n string = [\"A\", \"B\", \"C\"]\n sorted_string = bubble_sort(string)\n for small, large in zip(sorted_string[:-1], sorted_string[1:]):\n assert small <= large\n\n negative_numbers = [-3, -5, -1, -99, -34, -33]\n sorted_negative_numbers = bubble_sort(negative_numbers)\n ...
[ "0.66505706", "0.6519151", "0.64865875", "0.6440262", "0.6426011", "0.6351435", "0.63446116", "0.6320897", "0.6317372", "0.6254106", "0.6245488", "0.6240697", "0.62373954", "0.62215376", "0.62139773", "0.6196602", "0.6195784", "0.6189653", "0.6173531", "0.61667407", "0.616407...
0.0
-1
Ensures proper order is preserved with multiple formats
def test_multi_template(): data = [] data.extend(["{}_data.json".format(i) for i in range(50)]) data.extend(["{}_log.csv".format(i) for i in range(50)]) data.extend(["filename_{}.py".format(i) for i in range(50)]) data.extend(["stuff_{}.py".format(i) for i in range(50)]) temp = data[:] random.shuffle(temp) assert data == sort(temp)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _translate_fmts(self):\n fmt_info = []\n fmt_append = fmt_info.append\n \n isvalid = self._is_valid_fmt\n typlist = self._typlist\n isstrvar = self._isstrvar\n default_fmts = self._default_fmts\n \n for i, fmt in enumerate(self._fmtlist):\n ...
[ "0.59663427", "0.5793017", "0.5785299", "0.57808274", "0.5741465", "0.5710367", "0.5705475", "0.5698985", "0.55878264", "0.5578431", "0.5562306", "0.55582625", "0.55492294", "0.55001897", "0.5494213", "0.5479568", "0.5436687", "0.5435134", "0.53975886", "0.53861207", "0.53597...
0.0
-1
Converts the provided integer 'n' into a valid insertion point in the string 's', ie the current index locations or at the end
def gen_index_via_mod(s, n): if len(s) == 0: return 0 return n % (len(s) + 1)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def move_to_end(s, n):\n first=s[0:n]\n return s[n:] + first", "def string(self,pos_0,pos_1,n):\r\n n=int(n)\r\n if pos_0 <10:\r\n pos_0=\"00\"+str(pos_0)\r\n elif pos_0<100:\r\n pos_0=\"0\"+str(pos_0)\r\n\r\n if n <10:\r\n n=\"0\"+str((n))\r\n ...
[ "0.63267386", "0.6178822", "0.61630845", "0.6155788", "0.6115236", "0.6103694", "0.6012176", "0.6011193", "0.594181", "0.593987", "0.5923727", "0.5884271", "0.580508", "0.5737656", "0.5725076", "0.5623249", "0.5594806", "0.55564487", "0.5525483", "0.55181396", "0.5496168", ...
0.6202475
1
Make sure we don't insert in adjacent locations, otherwise the numbers will join together and our created ordering will be invalid, failing test.
def remove_adjacent_nums(n): output = [] for e in n: if len(output) == 0 or output[-1][0] <= e[0] - 2: output.append(e) return output
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_ordering(self):\r\n def verify_order(source_usage_key, parent_usage_key, source_position=None):\r\n usage_key = self._duplicate_item(parent_usage_key, source_usage_key)\r\n parent = self.get_item_from_modulestore(parent_usage_key)\r\n children = parent.children\r\n ...
[ "0.6282855", "0.6281357", "0.6248567", "0.59403217", "0.59152997", "0.5827504", "0.5776358", "0.5738565", "0.5712663", "0.5696795", "0.5676538", "0.5648015", "0.562953", "0.561613", "0.56155276", "0.55678797", "0.5497624", "0.54904354", "0.54486006", "0.54314625", "0.5423515"...
0.0
-1
Gets the number of announcements on the server
def get(self): return {'status': 'success', 'count': Announcement.query.count()}, 200
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def GetCount(self):\n return self._server.get_count()", "def count(self):\r\n url = '{0}/{1}'.format(self.get_url(), 'count')\r\n\r\n return http.Request('GET', url), parsers.parse_json", "def count(self) -> int:\n return pulumi.get(self, \"count\")", "def count_urls(self):\n ...
[ "0.71485007", "0.6948312", "0.67301506", "0.6616612", "0.66085494", "0.6601228", "0.6522103", "0.6497675", "0.64732087", "0.64507085", "0.6435281", "0.6422948", "0.6408664", "0.64079416", "0.64079416", "0.6383736", "0.6378424", "0.6348932", "0.6346476", "0.63463545", "0.63441...
0.6914508
2
Gets all announcements on the server
def get(self): announcements = Announcement.query.all() announcements = announcements_schema.dump(announcements) if not announcements: return {'status': 'success', 'announcements': announcements}, 206 # Partial Content Served return {'status': 'success', 'announcements': announcements}, 200
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "async def getAnnouncements(self, body=\"\"):\n payload = {}\n \n # Parameter validation\n schema = ContentValidator.getAnnouncements()\n schema.dump(schema.load(payload))\n \n\n url_with_params = await create_url_with_params(api_url=self._urls[\"getAnnouncements\"],...
[ "0.67929053", "0.6228621", "0.61567163", "0.6098976", "0.5930889", "0.59122926", "0.58568746", "0.5851314", "0.58407116", "0.5822206", "0.5804673", "0.5770579", "0.57031876", "0.56998605", "0.5559147", "0.5553164", "0.5529305", "0.55254424", "0.55103004", "0.5502346", "0.5491...
0.6474763
1
delete a announcement by ID
def delete(self, announcementID): announcement = Announcement.query.filter_by(announcementID=announcementID) if not announcement.first(): return {'status': 'fail', 'message': 'No announcement with ID ' + str(announcementID) + ' exists'}, 404 announcement.delete() db.session.commit() return {'status': 'sucess', 'message': 'Announcement Deleted'}, 200
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def delete(self, _id):", "def delete(self, id):\n raise NotImplementedError", "def delete(self,id):\r\n return delete(id=id)", "def delete(self, id):\n\n ns.abort(404, 'This API is not supported yet.')", "def delete(self, id):\n return delete_msg(id)", "def delete(self, id):\n...
[ "0.7832141", "0.7315442", "0.718373", "0.71590054", "0.715206", "0.7093981", "0.7008346", "0.6998064", "0.6946827", "0.6931493", "0.6922772", "0.68966997", "0.6869891", "0.6861729", "0.6804666", "0.6804666", "0.6804666", "0.6804666", "0.6804666", "0.6717501", "0.6696411", "...
0.79569536
0
Function that converts category name to Python module name Eg. rwgeneric to RwGenericYang
def get_module_name_from_log_category(log_category): words = log_category.split('-') words.append('yang') return ''.join(word.capitalize() for word in words)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def normalize_module_name(layer_name):\n modules = layer_name.split('.')\n try:\n idx = modules.index('module')\n except ValueError:\n return layer_name\n del modules[idx]\n return '.'.join(modules)", "def module_name(self):\n return \"py{0:s}\".format(self.library_name[3:])", "...
[ "0.6348545", "0.62357664", "0.6161529", "0.6005554", "0.59594476", "0.5909393", "0.58015877", "0.576444", "0.5749584", "0.5743478", "0.57170683", "0.57096314", "0.5701094", "0.56758934", "0.5675464", "0.5667408", "0.56373435", "0.5630453", "0.55773044", "0.5576993", "0.556977...
0.7726592
0
Create an instance of RwLogger
def __init__(self, category=None, log_hdl=None, file_name=None): logging.Handler.__init__(self) """ Set the default formatter to include a rwlog marker so we know the message are being sent to rwlog.""" self.setFormatter("(rwlog)" + logging.BASIC_FORMAT) if file_name is None: frame = get_frame() file_name = get_caller_filename(frame) if category is not None: if not isinstance(category, six.string_types): raise TypeError("Category should be a string") self.category = category # GBoxed types don't accept constructors will arguments # RwLog.Ctx(file_name) will throw an error, so call # new directly if not log_hdl: log_hdl = RwLog.Ctx.new(file_name) self._log_hdl = log_hdl self.set_category('rw-generic') self._group_id = None self._rwlogd_inited = False shm_filename = self._log_hdl.get_shm_filter_name() self._shm_filename = os.path.join('/dev/shm',shm_filename) try: self._shm_fd = open(self._shm_filename,'rb') self._shm_data=mmap.mmap(self._shm_fd.fileno(),length=0,flags=mmap.MAP_SHARED,prot=mmap.PROT_READ) except Exception as e: logger.error("Failed to open shm file: %s with exception %s",self._shm_filename,repr(e)) print("Failed to open shm file: %s with exception %s",self._shm_filename,repr(e)) self._log_serial_no = 0 self._log_severity = 7 # Default sev is debug
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def create_logger() -> logging.Logger:\n pass # TODO: Replace with implementation!", "def create_log(self):\n from settings import evidence_path\n test_case = self.__class__.__name__\n log_extension = '.log'\n if evidence_path is not None:\n log_path = '{}/{}{}'.format(...
[ "0.68505394", "0.6639985", "0.66274846", "0.65573317", "0.65444726", "0.65337306", "0.6491986", "0.648222", "0.64741373", "0.64380926", "0.64128065", "0.6381014", "0.6374408", "0.6352451", "0.6310848", "0.630257", "0.62926865", "0.6291359", "0.6255781", "0.62482107", "0.62347...
0.0
-1
Set Log category name to be used.
def set_category(self, category_name): try: module_name = get_module_name_from_log_category(category_name) log_yang_module = importlib.import_module('gi.repository.' + module_name) if not log_yang_module: logger.error("Module %s is not found to be added as log category for %s", module_name, category_name) print("Module %s is not found to be added as log category for %s", module_name, category_name) return for level in RwLogger.level_event_cls_map.values(): if not hasattr(log_yang_module, level): logger.error("Module %s does not have required log notification for %s", module_name, level) print("Module %s does not have required log notification for %s", module_name, level) return self._log_yang_module = log_yang_module self._log_category_name = category_name except Exception as e: logger.exception("Caught error %s when trying to set log category (%s)",repr(e), category_name)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def rider_category_name(self, rider_category_name):\n\n self._rider_category_name = rider_category_name", "def category(self, category: str):\n\n self._category = category", "def set_scribe_category(category):\r\n LogOptions._SCRIBE_CATEGORY = category", "def set_category(self, category):\n\...
[ "0.71033996", "0.656124", "0.6451363", "0.64011025", "0.63785875", "0.6319959", "0.6303118", "0.6279708", "0.6278118", "0.61796457", "0.608429", "0.608429", "0.608429", "0.608429", "0.608429", "0.60362625", "0.6006342", "0.59686434", "0.5840249", "0.57967573", "0.57849914", ...
0.7935372
0
Tests whether ``TextInputStyle`` instance names are all strings.
def test__TextInputStyle__name(): for instance in TextInputStyle.INSTANCES.values(): vampytest.assert_instance(instance.name, str)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def isStringStyle(self, style):\n return style in [QsciLexerJava.DoubleQuotedString,\n QsciLexerJava.SingleQuotedString,\n QsciLexerJava.UnclosedString,\n QsciLexerJava.VerbatimString]", "def isStringStyle(self, style):\n retur...
[ "0.7289942", "0.72890097", "0.6223571", "0.6214723", "0.6137764", "0.5913751", "0.584948", "0.5819439", "0.5805448", "0.5728817", "0.5721202", "0.5700193", "0.5695489", "0.5691739", "0.5658013", "0.56440264", "0.56350374", "0.5633974", "0.55882585", "0.5547582", "0.5522903", ...
0.68636096
2
Tests whether ``TextInputStyle`` instance values are all the expected value type.
def test__TextInputStyle__value(): for instance in TextInputStyle.INSTANCES.values(): vampytest.assert_instance(instance.value, TextInputStyle.VALUE_TYPE)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test__TextInputStyle__name():\n for instance in TextInputStyle.INSTANCES.values():\n vampytest.assert_instance(instance.name, str)", "def _check_value_type(self, value):\n if value is not None and self.value_type is not None:\n valid = isinstance(value, self.value_type)\n ...
[ "0.61332947", "0.59603673", "0.5806856", "0.5770944", "0.57419115", "0.5711008", "0.5696463", "0.567288", "0.5661682", "0.5579446", "0.55630475", "0.5551327", "0.55341244", "0.5487145", "0.54515827", "0.5438341", "0.54083705", "0.54033923", "0.5351363", "0.5323914", "0.531121...
0.741814
0
No real implementation necessary. Only for heapq.
def __lt__(self, other): return True
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def heapify(x):\n pass", "def test_pop_decreases_size(sample_priorityq):\n for i in range(5):\n sample_priorityq.insert([i, i + 3])\n sample_priorityq.pop()\n assert len(sample_priorityq.heap_list) == 4\n sample_priorityq.pop()\n assert len(sample_priorityq.heap_list) == 3\n sample_pr...
[ "0.73582697", "0.7054749", "0.6949152", "0.6781712", "0.67741895", "0.6711578", "0.6673364", "0.6665171", "0.66525424", "0.66385454", "0.65639687", "0.6450138", "0.64492726", "0.6448973", "0.6430282", "0.6409417", "0.639338", "0.639338", "0.639338", "0.6392037", "0.63643914",...
0.0
-1
Tests that example.com was in the dashboard.
def test_link_list(self): response = self.client.get('/tests/dashboard/') self.assertEqual(response.status_code, 200) self.assertContains(response, "example.com")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_dashboard_page(self):\r\n\r\n result = self.client.get(\"/dashboard\", follow_redirects = True)\r\n self.assertNotIn(b\"Family Ties - Dashboard\", result.data)", "def test_dashboard_is_up(dashboard_address):\n response = requests.get(f\"{dashboard_address}/health\")\n assert response...
[ "0.7439744", "0.71425265", "0.6988231", "0.69753", "0.6952687", "0.69291663", "0.69152224", "0.69117343", "0.68734396", "0.68194866", "0.67331254", "0.673261", "0.6697194", "0.66839606", "0.66659987", "0.66591775", "0.6643598", "0.66313016", "0.6605587", "0.65915424", "0.6585...
0.7536914
0
Tests that the admin list found the User and Group admins
def test_admin_list(self): response = self.client.get('/tests/dashboard/') self.assertEqual(response.status_code, 200) self.assertContains(response, '<a href="/admin/auth/group/">Group</a>', html=True) self.assertContains(response, '<a href="/admin/auth/user/">User</a>', html=True)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_admin_calendar_user_admin_list(self):\n response = self.client.get(\"/admin/auth/calendaruser/\")\n self.assertEqual(response.status_code, 200)", "def test_cannot_remove_all_admins(self):\n r = self.app.get('/admin/groups/')\n admin_holder = r.html.find(\n 'table',...
[ "0.7459246", "0.7412557", "0.74001384", "0.7367604", "0.7157066", "0.70924866", "0.70496404", "0.70242584", "0.70158803", "0.7010552", "0.69874895", "0.696997", "0.69406426", "0.6914392", "0.6912082", "0.6910139", "0.69032836", "0.68800515", "0.68568987", "0.6850447", "0.6785...
0.79272
0
Tests that the testuser was found.
def test_user_list(self): response = self.client.get('/tests/dashboard/') self.assertEqual(response.status_code, 200) self.assertContains(response, 'testuser', html=True) self.assertContains(response, 'none@nowhere.none', html=True)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_user(self):\n return True", "def test_contains_user(self):\n print('(' + self.test_contains_user.__name__+')',\n self.test_contains_user.__doc__)\n self.assertTrue(self.connection.contains_user(PATIENT_USERNAME))\n self.assertTrue(self.connection.contains_user(DO...
[ "0.76748985", "0.7660696", "0.7656335", "0.75119334", "0.7449909", "0.73115385", "0.72937554", "0.72691184", "0.72514313", "0.7239464", "0.7218272", "0.71586215", "0.7152206", "0.71307015", "0.71049184", "0.7088999", "0.70759594", "0.7050742", "0.70313215", "0.7025326", "0.70...
0.6536369
86
Initialize a new FullyConnectedNet.
def __init__(self, hidden_dims, input_dim=3 * 32 * 32, num_classes=10, dropout=0, use_batchnorm=False, reg=0.0, weight_scale=1e-2, dtype=np.float32, seed=None): self.use_batchnorm = use_batchnorm self.use_dropout = dropout > 0 self.reg = reg self.num_layers = 1 + len(hidden_dims) self.dtype = dtype self.params = {} if type(hidden_dims) != list: raise ValueError('hidden_dim has to be a list') self.L = len(hidden_dims) + 1 self.N = input_dim self.C = num_classes dims = [self.N] + hidden_dims + [self.C] Ws = {'W' + str(i + 1): weight_scale * np.random.randn(dims[i], dims[i + 1]) for i in range(len(dims) - 1)} b = {'b' + str(i + 1): np.zeros(dims[i + 1]) for i in range(len(dims) - 1)} self.params.update(b) self.params.update(Ws) # When using dropout we need to pass a dropout_param dictionary to each # dropout layer so that the layer knows the dropout probability and the mode # (train / test). You can pass the same dropout_param to each dropout layer. # Cast all parameters to the correct datatype for k, v in self.params.iteritems(): self.params[k] = v.astype(dtype)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def initialise_network(self):\n raise NotImplementedError", "def initialize_network(self):\n # intermediate layer size\n ils = int((self.specbinnum + self.numfilters) / 2)\n\n network = lasagne.layers.InputLayer((None, 1, self.specbinnum, self.numtimebins), self.input_var)\n\n ...
[ "0.69023705", "0.6850007", "0.6721665", "0.65706", "0.6532473", "0.64673805", "0.64505595", "0.64458954", "0.6434368", "0.64332235", "0.64150655", "0.6372491", "0.636863", "0.6341182", "0.6330495", "0.63025963", "0.6276662", "0.62390316", "0.6235152", "0.61963755", "0.6190024...
0.0
-1
Compute loss and gradient for the fullyconnected net.
def loss(self, X, y=None): X = X.astype(self.dtype) mode = 'test' if y is None else 'train' # We are gonna store everythin in a dictionnary hidden hidden = {} hidden['h0'] = X.reshape(X.shape[0], np.prod(X.shape[1:])) for i in range(self.L): idx = i + 1 # Naming of the variable w = self.params['W' + str(idx)] b = self.params['b' + str(idx)] h = hidden['h' + str(idx - 1)] # Computing of the forward pass. # Special case of the last layer (output) if idx == self.L: h, cache_h = affine_forward(h, w, b) hidden['h' + str(idx)] = h hidden['cache_h' + str(idx)] = cache_h # For all other layers else: h, cache_h = affine_relu_forward(h, w, b) hidden['h' + str(idx)] = h hidden['cache_h' + str(idx)] = cache_h scores = hidden['h' + str(self.L)] # If test mode return early if mode == 'test': return scores # Computing of the loss data_loss, dscores = softmax_loss(scores, y) reg_loss = 0 for w in [self.params[f] for f in self.params.keys() if f[0] == 'W']: reg_loss += 0.5 * self.reg * np.sum(w * w) loss = data_loss + reg_loss # Backward pass hidden['dh' + str(self.L)] = dscores for i in range(self.L)[::-1]: idx = i + 1 dh = hidden['dh' + str(idx)] h_cache = hidden['cache_h' + str(idx)] if idx == self.L: dh, dw, db = affine_backward(dh, h_cache) hidden['dh' + str(idx - 1)] = dh hidden['dW' + str(idx)] = dw hidden['db' + str(idx)] = db else: dh, dw, db = affine_relu_backward(dh, h_cache) hidden['dh' + str(idx - 1)] = dh hidden['dW' + str(idx)] = dw hidden['db' + str(idx)] = db # w gradients where we add the regulariation term list_dw = {key[1:]: val + self.reg * self.params[key[1:]] for key, val in hidden.iteritems() if key[:2] == 'dW'} # Paramerters b list_db = {key[1:]: val for key, val in hidden.iteritems() if key[:2] == 'db'} # Parameters gamma list_dgamma = {key[1:]: val for key, val in hidden.iteritems() if key[:6] == 'dgamma'} # Paramters beta list_dbeta = {key[1:]: val for key, val in hidden.iteritems() if key[:5] == 'dbeta'} grads = {} grads.update(list_dw) grads.update(list_db) grads.update(list_dgamma) grads.update(list_dbeta) return loss, grads
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def compute_loss_and_gradients(self, X, y):\n # Before running forward and backward pass through the model,\n # clear parameter gradients aggregated from the previous pass\n # TODO Set parameter gradient to zeros\n # Hint: using self.params() might be useful!\n self.fulllayer1.W....
[ "0.7155436", "0.6939661", "0.685114", "0.68466926", "0.68327856", "0.67759883", "0.6716115", "0.66500854", "0.662701", "0.66258705", "0.660631", "0.65530854", "0.65456945", "0.6514191", "0.6513278", "0.6499993", "0.6499993", "0.6494454", "0.6474171", "0.6471904", "0.64706767"...
0.6169516
61
n = 2 1x2 0 1 n = 3 2x2 0 1 2 x n = 4 2x2 0 1 2 3 n = 5 2x3 0 1 2 3 4 x n = 6 2x3 n=7 3x3
def filled_grid(n): i = 0 r, c = 1, 1 while r * c < n: if i % 2: r += 1 else: c += 1 i += 1 return r, c
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def sw(n):\n return 4*n*n + 2*n + 1", "def nw(n):\n return 4*n*n + 1", "def ne(n):\n return 4*n*n - 2*n + 1", "def problem():\n size = 1001\n return sum(n**2 * 4 - 6 * n + 6 for n in range(3, size+1, 2)) + 1", "def heptagonal(n: int) -> int:\n return int(n * (5 * n - 3) / 2)", "def prob...
[ "0.73914665", "0.72387326", "0.6959924", "0.6865498", "0.67776674", "0.67680246", "0.6764694", "0.6735738", "0.66464823", "0.6636954", "0.6585204", "0.6544291", "0.65394866", "0.65350693", "0.6500963", "0.64812386", "0.6394155", "0.63908386", "0.6298539", "0.6288296", "0.6287...
0.621542
35
A convenience function to get planet position from spice
def planetPositionVelocity(planetName, time, ephemerisPath = '/supportData/EphemerisData/pck00010.tpc', observer = 'SSB', frame = 'J2000'): pyswice.furnsh_c(bskPath + '/supportData/EphemerisData/de430.bsp') pyswice.furnsh_c(bskPath + '/supportData/EphemerisData/naif0012.tls') #load leap seconds pyswice.furnsh_c(bskPath + ephemerisPath) positionVelocity = pyswice.spkRead(planetName, time, frame, observer) position = positionVelocity[0:3] * 1000 velocity = positionVelocity[3:6] * 1000 pyswice.unload_c(bskPath + ephemerisPath) return position, velocity # [m], [m/s]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_planet(coords):\n r_theta = output_coord_to_r_theta(coords)\n input_coords = r_theta_to_input_coords(r_theta)\n return input_coords", "def get_position(self, t0):\n my_pos_x=np.random.uniform(-20, 20)\n my_pos_y=np.random.uniform(-20, 20)\n r=np.array([my_pos_x, my_pos_y])\n...
[ "0.6685541", "0.63394314", "0.6268292", "0.6207987", "0.6067741", "0.60477245", "0.60084856", "0.59766847", "0.59688926", "0.5960511", "0.5945551", "0.5903077", "0.5866284", "0.5810986", "0.57815737", "0.577838", "0.5764995", "0.5735745", "0.5732618", "0.5730515", "0.57264274...
0.61307
4
Backup the git refs.
def backup_ref(self): # Back ourselves up! backup_ref="refs/backups/{0}-{1}-{2}".format(self.ref_type, self.ref_name, int( time.time() )) command = ("git", "update-ref", backup_ref, self.old_sha1) process = subprocess.Popen(command, shell=False, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def backup(self):\n\n\t\twith temp_dir(self.path):\n\t\t\t# only if changes made\n\t\t\tcheck = sp.check_output(['git', 'status', '--porcelain'])\n\t\t\t# check if untracked files\n\t\t\tuntracked = sp.check_output(['git', 'ls-files', '--others', '--exclude-standard'])\n\n\t\t\tif check:\n\t\t\t\tif untracked:\n\t...
[ "0.6952309", "0.6250587", "0.6083144", "0.6023908", "0.6018048", "0.5927529", "0.5912871", "0.58550334", "0.5830668", "0.5823843", "0.5808333", "0.5635343", "0.5619511", "0.5558526", "0.5550184", "0.54987204", "0.54584205", "0.54354334", "0.54245806", "0.53884566", "0.5356423...
0.7884595
0
Whether the audit failed (True) or passed (False).
def audit_failed(self): return self.__failed
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def hasFailed(self):\n record = self.getRunRecord().getRecord(\"run\")\n return record.state is FAIL", "def is_failed(self):\n\n return self._state == \"FAILED\"", "def is_failed(self):\n with settings(hide('running', 'stdout', 'stderr', 'warnings'), warn_only=True):\n re...
[ "0.70987207", "0.7077051", "0.6967613", "0.6923564", "0.68601596", "0.6790287", "0.67796767", "0.6752796", "0.67493594", "0.67102164", "0.6676905", "0.667684", "0.6542292", "0.64718133", "0.6417293", "0.64025366", "0.63911164", "0.634746", "0.6297389", "0.62367797", "0.623135...
0.76317364
0
Audit the commit for proper endofline characters. The UNIX type EOL is the only allowed EOL character.
def audit_eol(self): # Regex's.... re_commit = re.compile("^\xff(.+)\xff$") re_filename = re.compile("^diff --(cc |git a\/.+ b\/)(.+)$") blocked_eol = re.compile(r"(?:\r\n|\n\r|\r)$") # Bool to allow special files such as vcards to bypass the check eol_allowed = False # Do EOL audit! process = get_change_diff( self.repository, ["-p"] ) for line in process.stdout: commit_change = re.match( re_commit, line ) if commit_change: commit = commit_change.group(1) continue file_change = re.match( re_filename, line ) if file_change: filename = file_change.group(2) eol_violation = False eol_allowed = False # Check if it's an allowed mimetype # First - check with the mimetypes system, to see if it can tell guessed_type, _ = mimetypes.guess_type(filename) if guessed_type in self.ALLOWED_EOL_MIMETYPES: eol_allowed = True continue # Second check: by file extension # NOTE: This uses the FIRST dot as extension splitted_filename = filename.split(os.extsep) # Check if there's an extension or not # NOTE This assumes that files use dots for extensions only! if len(splitted_filename) > 1: extension = splitted_filename[1] if extension in self.ALLOWED_EOL_EXTENSIONS: eol_allowed = True continue # Unless they added it, ignore it if not line.startswith("+"): continue if re.search( blocked_eol, line ) and not eol_violation: # Is this an allowed filename? if eol_allowed: continue # Failure has been found... handle it eol_violation = True self.__log_failure(commit, "End of Line Style (non-Unix): " + filename);
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def eol(self):\n if self.current not in EOL:\n self.on_parser_error(\"EOL expected\")\n self.maybe_eol()", "def _output_commit_line(self): # noqa: C901, E501 pylint: disable=too-many-branches\n seen_this = False\n chars_written = 0\n for i in range(self.num_columns ...
[ "0.6151211", "0.6065239", "0.57468516", "0.5741316", "0.5723494", "0.5639385", "0.5574638", "0.5561204", "0.5554823", "0.55486727", "0.553186", "0.5530341", "0.55275774", "0.5481987", "0.54660696", "0.540383", "0.5398025", "0.5388231", "0.53565466", "0.53498983", "0.5348075",...
0.83223575
0
Audit the file names in the commit.
def audit_filename(self): for commit in self.repository.commits.values(): for filename in commit.files_changed: if commit.files_changed[ filename ]["change"] not in ["A","R","C"]: continue for restriction in self.filename_limits: if re.search(restriction, filename): self.__log_failure(commit.sha1, "Invalid filename: " + filename)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def audit_names_in_metadata(self):\n\n # Iterate over commits....\n for commit in self.repository.commits.values():\n for name in [ commit.committer_name, commit.author_name ]:\n # Is the name whitelisted?\n if name in self.FullNameWhitelist:\n ...
[ "0.67635244", "0.63775945", "0.6034091", "0.5988995", "0.58283305", "0.5755764", "0.5654512", "0.56192213", "0.5432775", "0.5430367", "0.52749014", "0.5257821", "0.52496487", "0.5248489", "0.5241954", "0.5239385", "0.5227689", "0.5223898", "0.5222077", "0.52184683", "0.521339...
0.7895987
0
Audit names in commit metadata. Names which do not have a first name and a surname are extremely uncommon and when present are therefore generally invalid. As we want people to use their actual name when committing we do some checks to make sure that what looks like an actual name is present.
def audit_names_in_metadata(self): # Iterate over commits.... for commit in self.repository.commits.values(): for name in [ commit.committer_name, commit.author_name ]: # Is the name whitelisted? if name in self.FullNameWhitelist: continue # As a special case, allow the name 'GitHub' for certain repositories if name == 'GitHub' and self.repository.path in self.GitHubPRWhitelist: self.__log_warning(commit.sha1, "Commit has username 'GitHub' (web merge of PR); allowing anyway") continue # Check to see if the name contains spaces - if not - it is probably misconfigured.... if " " not in name.strip(): self.__log_failure(commit.sha1, "Non-full name: " + name) continue
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def audit_filename(self):\n\n for commit in self.repository.commits.values():\n for filename in commit.files_changed:\n if commit.files_changed[ filename ][\"change\"] not in [\"A\",\"R\",\"C\"]:\n continue\n for restriction in self.filename_limits...
[ "0.62963563", "0.6226209", "0.6223605", "0.6148646", "0.6140524", "0.61402285", "0.6099476", "0.60691816", "0.6056856", "0.6056856", "0.6024149", "0.6016685", "0.5969354", "0.5957472", "0.5951923", "0.5932206", "0.5932206", "0.5906392", "0.59056324", "0.589487", "0.58702075",...
0.8450045
0
Audit commit metadata. Invalid hostnames such as localhost or (none) will be caught by this auditor. This will ensure that invalid email addresses or users will not show up in commits.
def audit_emails_in_metadata(self): # Iterate over commits.... disallowed_domains = ["localhost", "localhost.localdomain", "(none)", "bombardier.com", "rail.bombardier.com"] for commit in self.repository.commits.values(): for email_address in [ commit.committer_email, commit.author_email ]: # Extract the email address, and reject them if extraction fails.... extraction = re.match("^(\S+)@(\S+)$", email_address) if not extraction: self.__log_failure(commit.sha1, "Seemingly invalid email address: " + email_address) continue # Don't allow domains which are disallowed... domain = extraction.group(2) if domain in disallowed_domains: self.__log_failure(commit.sha1, "Email address using a blocked domain: " + email_address) continue # Ensure they have a valid MX/A entry in DNS.... try: dns.resolver.query(domain, "MX") except (dns.resolver.NoAnswer, dns.exception.Timeout, dns.name.EmptyLabel): try: dns.resolver.query(domain, "A") except (dns.resolver.NoAnswer, dns.exception.Timeout, dns.name.EmptyLabel, dns.resolver.NXDOMAIN): self.__log_failure(commit.sha1, "Email address has an invalid domain : " + email_address) except (dns.resolver.NXDOMAIN, dns.resolver.NoNameservers): self.__log_failure(commit.sha1, "Email address has an invalid domain : " + email_address)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def audit_names_in_metadata(self):\n\n # Iterate over commits....\n for commit in self.repository.commits.values():\n for name in [ commit.committer_name, commit.author_name ]:\n # Is the name whitelisted?\n if name in self.FullNameWhitelist:\n ...
[ "0.6614517", "0.5485047", "0.5467171", "0.54115015", "0.5338054", "0.5191764", "0.5172768", "0.5139828", "0.5138755", "0.5125162", "0.50731736", "0.5002091", "0.49640554", "0.49508968", "0.49492618", "0.49048898", "0.49002182", "0.4897812", "0.4882828", "0.4845588", "0.482522...
0.7368635
0
Helper function to construct an address header for emails as Python stuffs it up
def address_header(self, name, email): fixed_name = Header( name ).encode() return unicode("{0} <{1}>").format(fixed_name, email)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def encode_rfc2822_address_header(header_text):\n def encode_addr(addr):\n name, email = addr\n # If s is a <text string>, then charset is a hint specifying the\n # character set of the characters in the string. The Unicode string\n # will be encoded using the following charsets in o...
[ "0.69923276", "0.67900985", "0.67680126", "0.6404634", "0.6254018", "0.62328947", "0.615882", "0.61046", "0.6023781", "0.6022702", "0.5986854", "0.5960331", "0.59242094", "0.5905854", "0.5892792", "0.58512026", "0.58358437", "0.5810467", "0.57352096", "0.5722051", "0.57130855...
0.7850054
0
Parse special keywords in commits to determine further postcommit actions.
def determine_keywords(self): split = dict() split['email_cc'] = re.compile("^\s*CC[-_]?MAIL[:=]\s*(.*)") split['email_cc2'] = re.compile("^\s*C[Cc][:=]\s*(.*)") split['fixed_in'] = re.compile("^\s*FIXED[-_]?IN[:=]\s*(.*)") numeric = dict() numeric['bug_fixed'] = re.compile("^\s*(?:BUGS?|FEATURE)[:=]\s*(.+)") numeric['bug_cc'] = re.compile("^\s*CCBUGS?[:=]\s*(.+)") presence = dict() presence['email_gui'] = re.compile("^\s*GUI:") presence['silent'] = re.compile("(?:CVS|SVN|GIT|SCM).?SILENT") presence['notes'] = re.compile("(?:Notes added by 'git notes add'|Notes removed by 'git notes remove')") results = defaultdict(list) for line in self.commit.message.split("\n"): # If our line starts with Summary: (as it does when using Arcanist's default template) then strip this off # This allows for people to fill keywords in the Differential Summary and have this work smoothly for them line = re.sub("^Summary: (.+)", "\g<1>", line) # Start processing our keywords... for (name, regex) in split.iteritems(): match = re.match( regex, line ) if match: results[name] += [result.strip() for result in match.group(1).split(",")] for (name, regex) in numeric.iteritems(): match = re.match( regex, line ) if match: results[name] += re.findall("(\d{1,10})", match.group(1)) for (name, regex) in presence.iteritems(): if re.match( regex, line ): results[name] = True self.keywords = results
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def parse_commit_message(message):\n # ['closes', 'close', 'fix', ...]\n keywords = []\n [keywords.extend(val) for val in KEYWORDS.values()]\n # we need to sort to match longuest command possible\n keywords.sort(lambda x, y: cmp(len(y), len(x)))\n # 'closes|close|fix...'\n keywords_re = '|'.jo...
[ "0.599186", "0.520402", "0.5114426", "0.5038884", "0.5008906", "0.49850717", "0.49810693", "0.48139718", "0.47817907", "0.46998245", "0.4676912", "0.46327248", "0.46078002", "0.46047723", "0.4595017", "0.45620742", "0.45620742", "0.45582268", "0.45472842", "0.45239753", "0.45...
0.56862926
1