query
stringlengths
9
3.4k
document
stringlengths
9
87.4k
metadata
dict
negatives
listlengths
4
101
negative_scores
listlengths
4
101
document_score
stringlengths
3
10
document_rank
stringclasses
102 values
Make sure all required tags are set
def clean_tags(self): if hasattr(self.instance, 'get_required_keys') and hasattr(self.instance, 'tags'): for key in self.instance.get_required_keys(): if key not in self.cleaned_data.get('tags'): raise forms.ValidationError("Tag %s missing." % key) return self.cleaned_data.get('tags')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _verify_tags(self):\n for tag in self.tags:\n if tag.lower() in VASP_TAG_LIST:\n continue\n else:\n print((\"Warning: unknown INCAR tag '\" + tag + \"' with value '\" + str(self.tags[tag]) + \"'\"))", "def test_tags(question):\n assert \"tags\" in...
[ "0.6713339", "0.62802184", "0.62078756", "0.61916", "0.6153197", "0.6120965", "0.61105335", "0.6104445", "0.60799277", "0.6070347", "0.6061206", "0.5990083", "0.5961519", "0.5878047", "0.5865956", "0.5823758", "0.5815917", "0.577706", "0.57572424", "0.57379663", "0.568212", ...
0.6199197
3
function used for writing late checkin record in payslip input tree.
def get_inputs(self, contracts, date_from, date_to): res = super(PayslipLateCheckIn, self).get_inputs(contracts, date_to, date_from) late_check_in_type = self.env.ref('employee_late_check_in.late_check_in') contract = self.contract_id late_check_in_id = self.env['late.check_in'].search([('employee_id', '=', self.employee_id.id), ('date', '<=', self.date_to), ('date', '>=', self.date_from), ('state', '=', 'approved'), ]) amount = late_check_in_id.mapped('amount') cash_amount = sum(amount) if late_check_in_id: self.late_check_in_ids = late_check_in_id input_data = { 'name': late_check_in_type.name, 'code': late_check_in_type.code, 'amount': cash_amount, 'contract_id': contract.id, } res.append(input_data) return res
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def writeto(self, fileout):\n \n dump_pkl(self.data, fileout)", "def dump(self,out):\n if self.changed: raise StateError(_('Data changed: ')+ self.name)\n if not self.data: raise StateError(_('Data undefined: ')+self.name)\n out.write(struct.pack('4s3i',self.name,self.size,self...
[ "0.5688454", "0.54053", "0.52774614", "0.52668023", "0.52515334", "0.5238117", "0.5215203", "0.5178974", "0.50876176", "0.506635", "0.5011479", "0.4995191", "0.49629214", "0.49451622", "0.49426115", "0.4934632", "0.48918885", "0.48890415", "0.48604503", "0.48549697", "0.48458...
0.0
-1
function used for marking deducted Late checkin request.
def action_payslip_done(self): for recd in self.late_check_in_ids: recd.state = 'deducted' return super(PayslipLateCheckIn, self).action_payslip_done()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def loan(self):", "def checkin(self):\n folio = self.folio_id\n if folio.payment_deposits <= 0:\n raise UserError(_(\"\"\"No record of security deposit found on folio {}\n \"\"\".format(folio.name)))\n if folio.state != 'on_queue':\n raise UserError(_(\n ...
[ "0.56058526", "0.557811", "0.5529027", "0.5488796", "0.52010155", "0.5192642", "0.516891", "0.51390547", "0.5094012", "0.50648564", "0.5062245", "0.5061174", "0.49786085", "0.49332282", "0.4931753", "0.49173915", "0.4892089", "0.4874537", "0.48676074", "0.4837274", "0.4802804...
0.5966537
0
Name of current protocol.
def name(self): return 'Null'
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def protocol(self) -> str:\n return __name__", "def protocol(self) -> str:\n return pulumi.get(self, \"protocol\")", "def protocol_name(self):\n self._protocol_name = 'kerberos'\n return self._protocol_name", "def getProtocol(self) -> str:\n ...", "def layer_protocol_name...
[ "0.8527097", "0.8032896", "0.8025242", "0.7800471", "0.7785212", "0.7730119", "0.76614213", "0.76614213", "0.75701255", "0.75649047", "0.75599545", "0.7478808", "0.7466269", "0.7439446", "0.7334409", "0.7231231", "0.71754944", "0.715286", "0.70239735", "0.70239735", "0.702397...
0.0
-1
Header length of current protocol.
def length(self): raise UnsupportedCall(f"'{self.__class__.__name__}' object has no attribute 'length'")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def length(self):\n return struct.unpack('<H', self.pkt.payload[2:4])[0]", "def length(self):\n return struct.unpack('<H', self.pkt.payload[6:8])[0]", "def calculated_length(self) -> int:\n return TunnellingRequest.HEADER_LENGTH + len(self.raw_cemi)", "def header_len(self):\n if s...
[ "0.773718", "0.7697014", "0.74317473", "0.74186736", "0.72751355", "0.72751355", "0.7265553", "0.72438484", "0.7240133", "0.7159681", "0.7159037", "0.7113465", "0.69755954", "0.69566464", "0.69399494", "0.69354415", "0.68519056", "0.6833837", "0.68100524", "0.6744303", "0.672...
0.0
-1
Name of next layer protocol.
def protocol(self): raise UnsupportedCall(f"'{self.__class__.__name__}' object has no attribute 'protocol'")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def layer_protocol_name(self) -> str:\n return self._layer_protocol_name", "def protocol(self):\n return self._info.next # pylint: disable=E1101", "def protocol(self) -> str:\n return __name__", "def get_name(self):\n \n return 'Loop-Back'", "def name(self):\n ret...
[ "0.7427126", "0.7015629", "0.7010182", "0.66151756", "0.6591896", "0.6542744", "0.6537509", "0.6536096", "0.6428703", "0.62516314", "0.6208627", "0.6208627", "0.6167117", "0.6059282", "0.6015737", "0.59872663", "0.597741", "0.5912", "0.589437", "0.5888402", "0.58677226", "0...
0.53521055
91
Read (parse) packet data.
def read(self, length=None, **kwargs): # pylint: disable=unused-argument return dict()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def read_packet(self):\n\n\t\t#self.debug(\"READ BUFFER SIZE: %d\" % len(self.buff))\n\t\tbackup = self.buff[:]\n\t\tpacket = Packet()\n\t\ttry:\n\t\t\tpacket.direction = self.node\n\t\t\tpacket.ident = self.unpack('ubyte')\n\t\t\t\n\t\t\t#Defined structs from huge dict\n\t\t\tfor datatype, name in self.get_struct...
[ "0.73247546", "0.718705", "0.6907943", "0.6843252", "0.68329656", "0.6731588", "0.66780645", "0.6584551", "0.6576974", "0.6557971", "0.6532748", "0.64306426", "0.6358723", "0.6336314", "0.6299943", "0.62733495", "0.62719905", "0.6264791", "0.6221513", "0.62090373", "0.6196162...
0.0
-1
Make (construct) packet data.
def make(self, **kwargs): return bytes()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def make_packet(self, type, data): \n return (\"{}\\x00{}\\x00{}\".format(type, data, self.ID)).encode()", "def _make_data(cls, data: 'Data_ARP') -> 'dict[str, Any]': # type: ignore[override]\n return {\n 'htype': data.htype,\n 'ptype': data.ptype,\n 'hlen':...
[ "0.7320882", "0.6888568", "0.6545822", "0.650796", "0.6446084", "0.6386353", "0.6256809", "0.6228369", "0.6189242", "0.61338437", "0.6114817", "0.6103833", "0.60672927", "0.60629386", "0.60616815", "0.605075", "0.60235", "0.5961972", "0.594975", "0.59361804", "0.5934646", "...
0.53749585
81
Numeral registry index of the protocol.
def __index__(cls): raise UnsupportedCall(f'{cls.__name__!r} object cannot be interpreted as an integer')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def InterfaceIndex(self) -> int:", "def InterfaceIndex(self) -> int:", "def get_numkey(self):\n return self._numkey", "def getPidx(self):\n return int(bytes(self.keeper.getGbl(b\"pidx\")), 16)", "def idx(self):\n return int(self.__ph.get('idx', 0))", "def register_index(self) -> int:...
[ "0.66120684", "0.66120684", "0.6520881", "0.6501883", "0.6485339", "0.63360345", "0.6295744", "0.6161678", "0.6085867", "0.603896", "0.6011073", "0.6011073", "0.5929243", "0.5907422", "0.5906776", "0.58790284", "0.5871748", "0.58596975", "0.5855196", "0.5851725", "0.58458555"...
0.0
-1
Decode next layer protocol.
def _decode_next_layer(self, *args, **kwargs): # pylint: disable=signature-differs raise UnsupportedCall(f"'{self.__class__.__name__}' object has no attribute '_decode_next_layer'")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _decode_next_layer(self, dict_, length=None):\n # make next layer protocol name\n proto = str(self._prot or 'Raw').lower()\n\n # make BytesIO from frame package data\n bytes_ = io.BytesIO(self._file.read(dict_['len']))\n info, protochain = self._import_next_layer(bytes_, leng...
[ "0.75996035", "0.6549479", "0.65207684", "0.6393973", "0.6335503", "0.6324604", "0.6315522", "0.62680393", "0.62302494", "0.6203099", "0.61799216", "0.6160706", "0.6160706", "0.6118852", "0.6044695", "0.6001132", "0.5993506", "0.5988984", "0.59845906", "0.59568155", "0.590623...
0.67941993
1
Import next layer extractor.
def _import_next_layer(self, *args, **kwargs): # pylint: disable=signature-differs raise UnsupportedCall(f"'{self.__class__.__name__}' object has no attribute '_import_next_layer'")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _import_next_layer(self, file_, length):\n if self._prot == 'Ethernet':\n from .link import Ethernet as Protocol\n elif self._prot == 'IPv4':\n from .internet import IPv4 as Protocol\n elif self._prot == 'IPv6':\n from .internet import IPv6 as Protocol\n ...
[ "0.6111195", "0.54837346", "0.54835594", "0.5450349", "0.5450116", "0.54399455", "0.5409996", "0.5303996", "0.52164537", "0.52136886", "0.516096", "0.51069194", "0.5101633", "0.5099417", "0.5078807", "0.5042868", "0.4991108", "0.49878004", "0.4973067", "0.49313796", "0.491468...
0.66877997
0
Class to organize and execute QA for a DESI production
def __init__(self, specprod_dir=None, **kwargs): if specprod_dir is None: specprod_dir = specprod_root() self.specprod_dir = specprod_dir # Init QA_MultiExp.__init__(self, specprod_dir=specprod_dir, **kwargs) # Load up exposures for the full production nights = get_nights(specprod_dir=self.specprod_dir) for night in nights: self.mexp_dict[night] = {} for exposure in get_exposures(night, specprod_dir = self.specprod_dir): # Object only?? frames_dict = get_files(filetype = str('frame'), night = night, expid = exposure, specprod_dir = self.specprod_dir) self.mexp_dict[night][exposure] = frames_dict # Output file names self.qaexp_outroot = self.qaprod_dir+'/'+self.prod_name+'_qa' # Nights list self.qa_nights = []
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def qa():\n env.config_file = 'config_production.py'\n env.hosts = ['ombu@d2.ombuweb.com:34165']\n env.host_type = 'qa'\n env.user = 'ombu'\n env.host_webserver_user = 'www-data'\n env.host_site_path = '/mnt/main/qa/qa2/public'", "def main():\n\n # Run all the requirements for part A\n ##...
[ "0.64223295", "0.6202825", "0.5899661", "0.57702637", "0.56131524", "0.5524281", "0.5509671", "0.5425418", "0.53912246", "0.53891236", "0.53559035", "0.53552413", "0.53085", "0.5271458", "0.5259535", "0.5251413", "0.524817", "0.5218205", "0.52015793", "0.51848274", "0.5179872...
0.5289428
13
Load QA data from night objects on disk
def load_data(self, inroot=None): self.data = {} # Load for night in self.mexp_dict.keys(): qaNight = QA_Night(night, specprod_dir=self.specprod_dir, qaprod_dir=self.qaprod_dir) qaNight.load_data() # self.data[night] = qaNight.data[night]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def load_data(self):", "def load_data(self) -> None:", "def load_rentedout():", "def _load_test_data(self):\n self._save_test_data()", "def load_mp_data():\n tree = Artifact.load(os.path.join(PREFIX_DIR, \"rooted-tree.qza\"))\n table = Artifact.load(os.path.join(PREFIX_DIR, \"table.qza\"))\n ...
[ "0.65798295", "0.6398732", "0.6165075", "0.61242807", "0.5998567", "0.58897173", "0.58770794", "0.58206344", "0.57725376", "0.5756605", "0.5740402", "0.5737037", "0.5737037", "0.57252693", "0.5663166", "0.5662534", "0.56499773", "0.56499773", "0.56499773", "0.56499773", "0.56...
0.6338221
2
Build QA data dict from the nights
def build_data(self): from desiutil.io import combine_dicts # Loop on exposures odict = {} for qanight in self.qa_nights: for qaexp in qanight.qa_exps: # Get the exposure dict idict = write_qa_exposure('foo', qaexp, ret_dict=True) odict = combine_dicts(odict, idict) # Finish self.data = odict
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def load_data(self, inroot=None):\n self.data = {}\n # Load\n for night in self.mexp_dict.keys():\n qaNight = QA_Night(night, specprod_dir=self.specprod_dir, qaprod_dir=self.qaprod_dir)\n qaNight.load_data()\n #\n self.data[night] = qaNight.data[nigh...
[ "0.5826197", "0.55371296", "0.5417445", "0.5337191", "0.52416694", "0.51694846", "0.5151711", "0.5091198", "0.5085503", "0.5074048", "0.5069187", "0.5063415", "0.50576264", "0.5021445", "0.50159806", "0.5006675", "0.4991931", "0.49876162", "0.49839446", "0.49812287", "0.49771...
0.6585092
0
Slurp all the individual QA files, night by night Loops on nights, generating QANight objects along the way
def slurp_nights(self, make_frameqa=False, remove=True, restrict_nights=None, write_nights=False, **kwargs): log = get_logger() # Remake? if make_frameqa: self.make_frameqa(**kwargs) # Reset log.info("Resetting QA_Night objects") self.qa_nights = [] # Loop on nights for night in self.mexp_dict.keys(): if restrict_nights is not None: if night not in restrict_nights: continue qaNight = QA_Night(night, specprod_dir=self.specprod_dir, qaprod_dir=self.qaprod_dir) qaNight.slurp(remove=remove) # Save nights self.qa_nights.append(qaNight) # Write? if write_nights: qaNight.write_qa_exposures()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def process_quasar(folder, set_type, doc_size):\n print(\"def process_quasar(folder, set_type, doc_size) ...\")\n\n # create counter for enumeration of batch-files\n counter = 0\n\n # Question File and Path\n question_file = set_type + \"_questions.json\"\n question_file_path = Path(\"/\".join([f...
[ "0.609987", "0.59429026", "0.5871865", "0.55248433", "0.54379", "0.5391129", "0.5366999", "0.53585476", "0.53522", "0.53338474", "0.5293552", "0.5277471", "0.5270402", "0.5206287", "0.52051455", "0.51889557", "0.5166118", "0.51528084", "0.51408404", "0.51355785", "0.5130651",...
0.5576337
3
Test case for add_or_update_case
def test_add_or_update_case(self): pass
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_update_case(self):\n pass", "def test_update_one(self):\n pass", "def test_update_record(self):\n pass", "def test_update(self):\n pass", "def test_update(self):\n pass", "def test_update(self):\n pass", "def test_update_scenario(self):\n pass",...
[ "0.79237324", "0.7314126", "0.7272837", "0.72685695", "0.72685695", "0.72685695", "0.712033", "0.69949913", "0.6982817", "0.69368035", "0.69113", "0.68479264", "0.68319446", "0.6824832", "0.6721132", "0.6690393", "0.6686279", "0.66511667", "0.6622248", "0.6599744", "0.6579878...
0.90939504
0
Test case for delete_case
def test_delete_case(self): pass
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_delete_run(self):\n pass", "def test_delete(self):\n pass", "def test_delete1(self):\n pass", "def test_delete_record(self):\n pass", "def test_CovidCase_delete(self):\n # setting up by creating and saving the the database\n del_Covid = self.create_CovidCa...
[ "0.85045195", "0.83174175", "0.8187092", "0.81214935", "0.7901589", "0.78933346", "0.7868206", "0.7762365", "0.7726762", "0.77229226", "0.7537628", "0.74579", "0.73829216", "0.735922", "0.7357545", "0.7353939", "0.73434424", "0.7334559", "0.7333317", "0.7319243", "0.72404516"...
0.94501704
0
Test case for get_case_by_id
def test_get_case_by_id(self): pass
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_get_case(loqusdbapi, mocker):\n # GIVEN a loqusdb api\n case_id = 'a_case'\n # WHEN fetching a case with the adapter\n mocker.patch.object(subprocess, 'check_output')\n loqusdb_output = (b\"{'_id': 'one_case', 'case_id': 'one_case'}\\n\"\n b\"{'_id': 'a_case', 'case_id'...
[ "0.7206168", "0.7135359", "0.70535195", "0.6983086", "0.69185466", "0.6779661", "0.6560765", "0.6444879", "0.6441801", "0.643984", "0.64307916", "0.6415146", "0.63614887", "0.63151497", "0.6309817", "0.6308288", "0.6291289", "0.62707704", "0.62072265", "0.62045175", "0.616458...
0.949092
0
Test case for get_cases_for_dict
def test_get_cases_for_dict(self): pass
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def navigate_case_dictionary(case_list_for_run, num_cases):", "def test_create_results_dict_1(self):\n dict = find_domains.create_results_dict(self.rps_results)\n with self.subTest():\n self.assertEqual(len(dict.keys()), 4)\n with self.subTest():\n self.assertEqual(len(...
[ "0.70761895", "0.65705884", "0.647785", "0.6304288", "0.6191103", "0.61260873", "0.6123775", "0.60676205", "0.60428697", "0.60196096", "0.59500134", "0.58859193", "0.5870065", "0.5868873", "0.5839127", "0.5819128", "0.57976145", "0.5794307", "0.57906353", "0.57838327", "0.576...
0.93498963
0
Test case for get_sync_history
def test_get_sync_history(self): pass
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_tracker_getHistory():\n\n trackers, cap = init_tracker()\n tr = trackers[0]\n tr.addHistory([1, 1, 1, 1])\n\n assert tr.getHistory()[1] == [1, 1, 1, 1]", "def test_get_team_history(self):\n pass", "def QueryHistory(self):\n return []", "def testGetHistory(self):\n self.maxDi...
[ "0.692454", "0.68217295", "0.6417337", "0.63609856", "0.6296297", "0.6270521", "0.6258328", "0.6244751", "0.62230396", "0.6212335", "0.6196629", "0.60348105", "0.602882", "0.5997421", "0.5959013", "0.5924428", "0.5920476", "0.59072894", "0.58979166", "0.58102685", "0.57995194...
0.9430264
0
Test case for update_case
def test_update_case(self): pass
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_update_scenario(self):\n pass", "def test_update(self):\n pass", "def test_update(self):\n pass", "def test_update(self):\n pass", "def test_add_or_update_case(self):\n pass", "def test_update_record(self):\n pass", "def test_update_one(self):\n ...
[ "0.8599205", "0.849513", "0.849513", "0.849513", "0.8350617", "0.8206942", "0.81498384", "0.81489325", "0.78003776", "0.75835353", "0.75727797", "0.7463717", "0.7440917", "0.7428357", "0.7388373", "0.7385997", "0.73503935", "0.7333985", "0.73062307", "0.7268944", "0.72565717"...
0.93751144
0
Set state of key exchange process
def _set_state(self, state): #print("** set state from %d to %d" % (self.state, state)) self.state = state
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __setstate__(self, state):\n state['_lock'] = Lock()\n self.__dict__.update(state)", "def __setstate__(self, state):\n\n self.set(DER = state)", "def set_state(self, state: int):", "def set_state( self ):", "def set_state(self, state: ProcessStateStr | core.QProcess.ProcessState):\...
[ "0.6264874", "0.62306124", "0.6219067", "0.62150574", "0.6114401", "0.609517", "0.6037395", "0.6025829", "0.5982595", "0.59734386", "0.5955964", "0.59348714", "0.59043247", "0.5876987", "0.5874065", "0.5861183", "0.58430445", "0.5808853", "0.5787678", "0.5764213", "0.57516503...
0.59008133
13
Set key to the encryptor and decryptor
def set_cipher(self, key_name, hint): message_key_types.set_cipher(self.shared_key, self.nonce, key_name, hint)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def generate_key(self):\n\n self.key = Fernet.generate_key()\n self.cryptor = Fernet(self.key)", "def set_encryption(key):\n global_scope['enc'] = Encryption(key.encode())", "def get_key(self, key_value):\n # Storing the correct key value back to the self.key attributes.\n self.k...
[ "0.75822407", "0.7209422", "0.7106368", "0.66078645", "0.6590576", "0.6399073", "0.6381233", "0.63626736", "0.63152766", "0.62950706", "0.6283543", "0.62832546", "0.62813187", "0.6264032", "0.623297", "0.61464673", "0.610903", "0.61076546", "0.60681397", "0.6064058", "0.60416...
0.612742
16
Unset key from the encryptor and decryptor
def unset_cipher(self, key_name=None): if key_name is None: if self.key_name is not None: message_key_types.unset_cipher(self.key_name) if self.pending_key_name is not None: message_key_types.unset_cipher(self.pending_key_name) else: message_key_types.unset_cipher(key_name)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def del_key(self):\n # Deleting the values from the self.key and self.cryptor attributes.\n self.key=None\n self.cryptor=None", "def clear_key(self, key):\r\n return self.handler.clear_key(key_to_code(key))", "def tearDown(self) -> None:\n\n del self.private_key\n del self...
[ "0.79658395", "0.6650825", "0.6479646", "0.64384425", "0.62364286", "0.6223678", "0.62217623", "0.61869067", "0.61497605", "0.6120932", "0.6093871", "0.60819805", "0.60426354", "0.6033569", "0.6002724", "0.5995064", "0.5992837", "0.5932019", "0.59261566", "0.5922496", "0.5895...
0.6914403
1
Set timer for key refreshment
def set_invoke_timer(self, timeout, retry_entry=False): if self.timer_entry is not None and self.timer_entry.active: self.timer_entry.deactivate() #print("(%d) set_invoke_timer:" % int(time.time()), timeout) self.timer_entry = query_management.QueryEntry(expire_after=timeout, callback_expire=self._perform_key_exchange, retry_count=0) if retry_entry: self.timer_entry.data[KeyType.retry_timer] = True
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def update_time(cls, key):\n key.put()", "def __updateElapsedTime(self):\n if self._keyCodeTime != 0.0 and \\\n (globalClock.getFrameTime() - self._keyCodeTime) >= self._timeout:\n self.notify.debug(\"Key code timed out. Resetting...\")\n self.reset()\n ...
[ "0.7022178", "0.66496783", "0.64718276", "0.64211446", "0.6270911", "0.62581867", "0.62419593", "0.62306786", "0.61925316", "0.6182545", "0.61779076", "0.6164358", "0.6159859", "0.6124073", "0.6121016", "0.6054378", "0.6042401", "0.6031246", "0.60209733", "0.6018492", "0.6016...
0.58821076
29
Set timer for key revocation
def _set_delete_timer(self, key_name, timeout): if key_name is not None: #print("(%d) _set_delete_timer:" % int(time.time()), key_name.hex()[:10], timeout) query_management.QueryEntry(expire_after=timeout, callback_expire=remove_old_key, data={KeyType.hint: key_name}, retry_count=0)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __updateElapsedTime(self):\n if self._keyCodeTime != 0.0 and \\\n (globalClock.getFrameTime() - self._keyCodeTime) >= self._timeout:\n self.notify.debug(\"Key code timed out. Resetting...\")\n self.reset()\n messenger.send(KeyCodes.CLEAR_CODE_E...
[ "0.6193708", "0.6188828", "0.61632437", "0.61356395", "0.6049411", "0.5949552", "0.5939341", "0.58892876", "0.5877896", "0.584273", "0.58422077", "0.5806897", "0.57887334", "0.57828623", "0.5779299", "0.5777707", "0.571499", "0.5553746", "0.55466074", "0.5524556", "0.5522834"...
0.6783776
0
Perform ECDH key exhange to establish secure channel to the node
def _perform_key_exchange(self, query_entry): if KeyType.retry_timer in query_entry.data and query_entry.data[KeyType.retry_timer]: message_key_types.unset_cipher(self.pending_key_name) self.pending_key_name = None self._set_state(KeyExchangeManager.STATE_REQUESTING) #print("# (%d) _perform_key_exchange: to" % int(time.time()), self.counter_node_id.hex()) self.secret_key, self.peer_public_key, self.pending_key_name = message_key_types.get_ECDH_parameters() self.nonce = os.urandom(16) self.random = os.urandom(8) ret = self.networking.send_key_exchange_message(self.domain_id, self.counter_node_id, "request", self.peer_public_key, self.nonce, self.random, self.pending_key_name) if not ret: self._set_state(KeyExchangeManager.STATE_NONE) message_key_types.unset_cipher(self.pending_key_name) message_key_types.unset_cipher(self.key_name) self.secret_key = None self.peer_public_key = None self.pending_key_name = None self.nonce = None self.random = None return rand_time = KeyExchangeManager.KEY_EXCHANGE_RETRY_INTERVAL*random.uniform(0.5, 1.5) self.set_invoke_timer(rand_time, retry_entry=True)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def exchange_key(connection, pub_key):\r\n\r\n if main.diffe_key_exchange is False:\r\n # Get the server's public key\r\n server_pub_key_bytes = connection.recv(1024)\r\n\r\n # Send public key\r\n connection.sendall(rsa.PublicKey.save_pkcs1(pub_key))\r\n\r\n else:\r\n # Rou...
[ "0.6512742", "0.6338184", "0.63009053", "0.62812775", "0.62282526", "0.613101", "0.6050611", "0.60426515", "0.6037874", "0.5955243", "0.5955243", "0.5944734", "0.5941283", "0.58977014", "0.5856448", "0.5838825", "0.5827069", "0.5824423", "0.5812279", "0.5808559", "0.57647914"...
0.0
-1
Procedure when receiving message with BBcNetwork.REQUEST_KEY_EXCHANGE
def receive_exchange_request(self, pubkey, nonce, random_val, hint): if self.state != KeyExchangeManager.STATE_REQUESTING: #print("(%d) receive_exchange_request: processing" % int(time.time())) self.peer_public_key = pubkey self.nonce = nonce self.random = random_val self.secret_key, self.peer_public_key, self.pending_key_name = message_key_types.get_ECDH_parameters() self.shared_key = message_key_types.derive_shared_key(self.secret_key, pubkey, random_val) self._set_state(KeyExchangeManager.STATE_CONFIRMING) self.networking.send_key_exchange_message(self.domain_id, self.counter_node_id, "response", self.peer_public_key, self.nonce, self.random, self.pending_key_name) self.set_cipher(self.pending_key_name, hint) else: #print("(%d) receive_exchange_request: ignoring" % int(time.time())) message_key_types.unset_cipher(self.pending_key_name) self.pending_key_name = None if self.key_name is None: self._set_state(KeyExchangeManager.STATE_NONE) else: self._set_state(KeyExchangeManager.STATE_ESTABLISHED) rand_time = KeyExchangeManager.KEY_EXCHANGE_RETRY_INTERVAL * random.uniform(0.5, 1.5) if self.timer_entry is not None and self.timer_entry.active: self.timer_entry.update_expiration_time(rand_time) self.timer_entry.data[KeyType.retry_timer] = True else: self.set_invoke_timer(rand_time, retry_entry=True)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def key_request(self, user):\n\t\tclient_log.debug(f'Запрос публичного ключа для {user}')\n\t\treq = {\n\t\t\tACTION: PUBLIC_KEY_REQUEST,\n\t\t\tTIME: time.time(),\n\t\t\tACCOUNT_NAME: user\n\t\t}\n\t\twith socket_lock:\n\t\t\tsend_message(self.transport, req)\n\t\t\tans = get_message(self.transport)\n\t\tif RESPO...
[ "0.67223096", "0.6613165", "0.66030747", "0.65658045", "0.6505863", "0.64924264", "0.63855624", "0.63809603", "0.6345278", "0.6331297", "0.6206609", "0.6176792", "0.6141044", "0.61168796", "0.61084545", "0.5941246", "0.5910018", "0.5898104", "0.58852327", "0.5862589", "0.5856...
0.6525278
4
Process ECDH procedure (receiving response)
def receive_exchange_response(self, pubkey, random_val, hint): #print("(%d) receive_exchange_response:" % int(time.time())) #print(" **> state:", self.state) if self.state != KeyExchangeManager.STATE_REQUESTING: return rand_time = int(KeyExchangeManager.KEY_REFRESH_INTERVAL*random.uniform(0.9, 1.1)) self.set_invoke_timer(rand_time) self.shared_key = message_key_types.derive_shared_key(self.secret_key, pubkey, random_val) self._set_delete_timer(self.key_name, KeyExchangeManager.KEY_OBSOLETE_TIMER) self.networking.send_key_exchange_message(self.domain_id, self.counter_node_id, "confirm", self.peer_public_key, self.nonce, self.random, self.pending_key_name) self.key_name = self.pending_key_name self.set_cipher(self.key_name, hint) self._set_state(KeyExchangeManager.STATE_ESTABLISHED) #print("*STATE_ESTABLISHED")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def response(self):\n return self._send(bytes([0xef,0xfe,0x02,0x0,0x0,0x0,0x0,0x0]))", "def process_eas (chan, eas):\n\n text = os.popen('./dsame.py --msg \"' + eas + '\"').read().split(\"\\n\")\n text2 = list(filter(None, text))\n n = len(text2)\n if n:\n print (\"Transmitting...\");\n ...
[ "0.6189765", "0.59589136", "0.5955229", "0.5889146", "0.57184684", "0.5701011", "0.5673351", "0.5545665", "0.55449224", "0.554279", "0.55046767", "0.5504035", "0.55030364", "0.5494673", "0.54934955", "0.5493104", "0.54797333", "0.5475469", "0.5475273", "0.5459943", "0.5458891...
0.0
-1
Confirm that the key has been agreed
def receive_confirmation(self): #print("(%d) receive_confirmation:" % int(time.time())) #print(" **> state:", self.state) if self.state != KeyExchangeManager.STATE_CONFIRMING: return rand_time = int(KeyExchangeManager.KEY_REFRESH_INTERVAL*random.uniform(0.9, 1.1)) self.set_invoke_timer(rand_time) self._set_delete_timer(self.key_name, KeyExchangeManager.KEY_OBSOLETE_TIMER) self.key_name = self.pending_key_name self._set_state(KeyExchangeManager.STATE_ESTABLISHED) #print("*STATE_ESTABLISHED")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "async def confirm(self, msg, *args):\n if Controller.prev_regex is None:\n await msg.channel.send(**{\n 'content': 'No key change in progress',\n 'reference': msg.to_reference(),\n 'mention_author': True,\n })\n return\n ...
[ "0.70200527", "0.6937028", "0.6796732", "0.65929675", "0.64999527", "0.64217055", "0.6304895", "0.626149", "0.6239152", "0.6135", "0.61015856", "0.6096368", "0.59683174", "0.5962593", "0.5954259", "0.59480214", "0.59259063", "0.58750486", "0.5863333", "0.58017457", "0.5787794...
0.63993216
6
Euclidean distance between two graph poses
def distance(pose1, pose2): return ( (pose1["pose"][3] - pose2["pose"][3]) ** 2 + (pose1["pose"][7] - pose2["pose"][7]) ** 2 + (pose1["pose"][11] - pose2["pose"][11]) ** 2 ) ** 0.5
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def graph_dist(i1: int, g1: nx.Graph, i2: int, g2: nx.Graph) -> t.Tuple[int, int, float]:\n space1, space2 = map(dict, map(mut_space, [g1, g2]))\n d = 0\n for k in set(list(space1) + list(space2)):\n if k in space1 and k in space2:\n d += len(set(space1[k]).symmetric_difference(set(space...
[ "0.7292896", "0.7110288", "0.6867631", "0.68615735", "0.6761212", "0.6698261", "0.66788447", "0.6668662", "0.6659703", "0.66497093", "0.6615427", "0.66116714", "0.66099334", "0.6598748", "0.65919584", "0.65918255", "0.65908754", "0.6572982", "0.6572364", "0.6497199", "0.64961...
0.0
-1
Build a graph from a connectivity json file
def open_graph(scan_id): infile = "%s%s_connectivity.json" % (connectivity_dir, scan_id) G = nx.Graph() with open(infile) as f: data = json.load(f) for i, item in enumerate(data): if item["included"]: for j, conn in enumerate(item["unobstructed"]): if conn and data[j]["included"]: assert data[j]["unobstructed"][i], "Graph should be undirected" G.add_edge( item["image_id"], data[j]["image_id"], weight=distance(item, data[j]), ) return G
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def open_graph(connectDir, scan_id):\n infile = \"%s%s_connectivity.json\" % (connectDir, scan_id)\n G = nx.Graph()\n with open(infile) as f:\n data = json.load(f)\n for i, item in enumerate(data):\n if item[\"included\"]:\n for j, conn in enumerate(item[\"unobstruc...
[ "0.7250749", "0.7151497", "0.69580156", "0.6932821", "0.6580459", "0.6554932", "0.6549572", "0.6462024", "0.6431498", "0.6411141", "0.6403866", "0.6394087", "0.63446313", "0.63325745", "0.63108885", "0.6290227", "0.6284171", "0.62591064", "0.6194136", "0.6182282", "0.6135531"...
0.7102555
2
.mfk play Begin the game ||| [+/] Edit the mfk user list ||| marry fuck kill Give the verdict ||| score Retrieves the score of a user
def mfk(inp, db=None): db_init(db) inp = inp.lower() m_regex = re.compile(r"marry\s([a-zA-Z0-9_]+\s|[a-zA-Z0-9_]+)").finditer(inp) f_regex = re.compile(r"fuck\s([a-zA-Z0-9_]+\s|[a-zA-Z0-9_]+)").finditer(inp) k_regex = re.compile(r"kill\s([a-zA-Z0-9_]+|[a-zA-Z0-9_]+\s)").finditer(inp) if inp.startswith('play'): #play game return play(db) if inp.startswith('+'): #add member nick = inp[2:] exists = db.execute("select user from mfk where user=?" ,(nick,)).fetchall() if exists: return "User has already been added." db.execute("insert into mfk values(?, ?, ?, ?)",(nick, 0, 0, 0)) db.commit() return "Added." if inp.startswith('-'): #remove member nick = inp[2:] exists = db.execute("select user from mfk where user=?" ,(nick,)).fetchall() if not exists: return "User doesn't exist." db.execute("delete from mfk where user=?",(nick,)) db.commit() return "Removed." updated = False for nick in m_regex: nick = nick.group() nick = nick[6:].strip(' ') update('marry',nick,db) updated = True for nick in f_regex: nick = nick.group() nick = nick[5:].strip(' ') update('fuck',nick,db) updated = True for nick in k_regex: nick = nick.group() nick = nick[5:].strip(' ') update('kill',nick,db) updated = True if inp.startswith('score'): #get score nick = inp[6:] return score(nick,db) if updated: return "That makes sense." return "Unexpected input."
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def enter_game_scores():\n pass", "def update_score():\n pass", "async def leaderboard(self, ctx):\r\n rank = 1\r\n strlist = []\r\n for k, v in sorted(player.items(), key=lambda x: expose(x[1]), reverse=True): # operator.itemgetter(1)\r\n position = str(rank) + '. ' ...
[ "0.67305034", "0.63729185", "0.6132123", "0.6093871", "0.60754216", "0.5949699", "0.59380084", "0.59304893", "0.59282887", "0.5903168", "0.58756876", "0.58682275", "0.5865948", "0.58637714", "0.5862318", "0.5842622", "0.58407027", "0.582625", "0.5826105", "0.5816378", "0.5811...
0.5794052
25
Returns the presence for this channel
def presence(self, params=None, timeout=None): params = params or {} path = '/channels/%s/presence' % self.__name return self.__ably._get(path, params=params, timeout=timeout).json()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def presence(self):\n return self.slack_client.api_call(\"users.getPresence?user=\"+self.user_id)", "def isHumanPresence(self):\n\t\treturn self.humanPresence", "def online(self):\n api_call = self.presence()\n if api_call.get('ok'):\n # retrieve all users so we can find our bot...
[ "0.78225", "0.65050405", "0.63529664", "0.6173057", "0.5725983", "0.5710549", "0.5505179", "0.54912657", "0.54869306", "0.54740065", "0.5472166", "0.5465967", "0.5449083", "0.5377885", "0.5373487", "0.5356357", "0.534464", "0.53218323", "0.52983725", "0.52873003", "0.528721",...
0.80038244
0
Returns the history for this channel
def history(self, direction=None, limit=None, start=None, end=None, timeout=None): params = {} if direction: params['direction'] = '%s' % direction if limit: params['limit'] = '%d' % limit if start: params['start'] = self._format_time_param(start) if end: params['end'] = self._format_time_param(end) path = '/channels/%s/history' % self.__name if params: path = path + '?' + urlencode(params) if self.__cipher: message_handler = make_encrypted_message_response_handler(self.__cipher) else: message_handler = message_response_handler return PaginatedResult.paginated_query( self.ably.http, path, None, message_handler )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_history(self):\n return self.history", "def history(self):\n return self.board.history", "def history(self):\n return self.info['history']", "def get_history(self):\r\n\r\n return self.board_history", "def history(self):\n return self._history", "def history(self):\...
[ "0.8223158", "0.8190711", "0.8100468", "0.8018653", "0.7970339", "0.7970339", "0.7960411", "0.763217", "0.7524663", "0.74288756", "0.7316858", "0.7306517", "0.7249383", "0.72073257", "0.7187233", "0.7177014", "0.71616286", "0.71466166", "0.713637", "0.7119178", "0.7116252", ...
0.7255194
12
Publishes a message on this channel.
def publish(self, name, data, timeout=None): message = Message(name, data) if self.encrypted: message.encrypt(self.__cipher) if self.ably.options.use_text_protocol: request_body = message.as_json() else: request_body = message.as_thrift() path = '/channels/%s/publish' % self.__name headers = HttpUtils.default_post_headers(not self.ably.options.use_text_protocol) return self.ably.http.post( path, headers=headers, body=request_body, timeout=timeout ).json()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def publish(self, message):\n logger.info(\"Publishing to topic [{0}]: {1}\".format(self._topic_name, message))\n self._executor.send(json.dumps({\n 'op': 'publish',\n 'id': 'publish:{0}:{1}'.format(self._topic_name, self._id),\n 'topic': self._topic_name,\n ...
[ "0.8069301", "0.7968601", "0.7785665", "0.76731753", "0.7639972", "0.761992", "0.75402987", "0.7448884", "0.74153394", "0.7411613", "0.7408458", "0.7408458", "0.73652995", "0.73333603", "0.7326912", "0.72585905", "0.7211472", "0.72038764", "0.7194173", "0.7134606", "0.7127167...
0.6742099
38
The set of arguments for constructing a Assessment resource.
def __init__(__self__, *, resource_details: pulumi.Input[Union['AzureResourceDetailsArgs', 'OnPremiseResourceDetailsArgs', 'OnPremiseSqlResourceDetailsArgs']], resource_id: pulumi.Input[str], status: pulumi.Input['AssessmentStatusArgs'], additional_data: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None, assessment_name: Optional[pulumi.Input[str]] = None, metadata: Optional[pulumi.Input['SecurityAssessmentMetadataPropertiesArgs']] = None, partners_data: Optional[pulumi.Input['SecurityAssessmentPartnerDataArgs']] = None): pulumi.set(__self__, "resource_details", resource_details) pulumi.set(__self__, "resource_id", resource_id) pulumi.set(__self__, "status", status) if additional_data is not None: pulumi.set(__self__, "additional_data", additional_data) if assessment_name is not None: pulumi.set(__self__, "assessment_name", assessment_name) if metadata is not None: pulumi.set(__self__, "metadata", metadata) if partners_data is not None: pulumi.set(__self__, "partners_data", partners_data)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __init__(__self__,\n resource_name: str,\n args: AssessmentArgs,\n opts: Optional[pulumi.ResourceOptions] = None):\n ...", "def __init__(__self__,\n resource_name: str,\n args: AssessmentPolicyArgs,\n opts:...
[ "0.71892506", "0.676996", "0.6377259", "0.6322052", "0.63183135", "0.59458417", "0.5945797", "0.57835245", "0.57723206", "0.5743007", "0.5741708", "0.5738933", "0.57031816", "0.5700542", "0.5700223", "0.56762666", "0.5639396", "0.56337005", "0.56271166", "0.56271166", "0.5627...
0.6006293
5
Details of the resource that was assessed
def resource_details(self) -> pulumi.Input[Union['AzureResourceDetailsArgs', 'OnPremiseResourceDetailsArgs', 'OnPremiseSqlResourceDetailsArgs']]: return pulumi.get(self, "resource_details")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def resource_details(self) -> pulumi.Output[Any]:\n return pulumi.get(self, \"resource_details\")", "def resource_details(self) -> pulumi.Output[Any]:\n return pulumi.get(self, \"resource_details\")", "def get_resource_details (self):\n return (f\"[Title:\\\"{self.get_title()}\\\"] [Author...
[ "0.7746103", "0.7746103", "0.76440537", "0.7001793", "0.6875115", "0.6875115", "0.6875115", "0.6875115", "0.6875115", "0.6875115", "0.6875115", "0.686545", "0.6756598", "0.6750164", "0.66482615", "0.6627125", "0.6553682", "0.6521818", "0.64652663", "0.6450048", "0.6442991", ...
0.6483526
18
The identifier of the resource.
def resource_id(self) -> pulumi.Input[str]: return pulumi.get(self, "resource_id")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def resource_id(self) -> str:\n return pulumi.get(self, \"resource_id\")", "def resource_id(self) -> str:\n return pulumi.get(self, \"resource_id\")", "def resource_id(self) -> str:\n return pulumi.get(self, \"resource_id\")", "def resourceid(self):", "def id(self):\n return sel...
[ "0.87842387", "0.87842387", "0.87842387", "0.8679565", "0.8428031", "0.8255889", "0.82200164", "0.7965054", "0.7965054", "0.7965054", "0.7965054", "0.7965054", "0.7965054", "0.7965054", "0.7965054", "0.7965054", "0.79283077", "0.79256773", "0.78568465", "0.7837528", "0.777672...
0.81783426
14
The result of the assessment
def status(self) -> pulumi.Input['AssessmentStatusArgs']: return pulumi.get(self, "status")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def result(self):\n return self['result']", "def results(self):\n\n\t\tresults = {'answer':42}\n\n\t\treturn results", "def result(self):\n\n print('Ergebnisse: -------------\\n'\n 'Richtige Antworten:{} \\n'\n 'Falsche Antworten:{} \\n'.format(self.answer_right, self.a...
[ "0.7455872", "0.7048662", "0.687895", "0.68733793", "0.6868782", "0.6851356", "0.68279386", "0.6774118", "0.67248684", "0.67248684", "0.67248684", "0.65798163", "0.65798163", "0.6516882", "0.6478505", "0.6459931", "0.645169", "0.64418316", "0.64348304", "0.6432639", "0.639982...
0.0
-1
Additional data regarding the assessment
def additional_data(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]: return pulumi.get(self, "additional_data")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def student_view_data(self, context=None):\n return {\n 'title': self.title,\n 'description': self.description,\n 'embed_code': self.embed_code,\n 'highres_url': self.highres_url,\n 'lowres_url': self.lowres_url,\n }", "def get_assessment(self)...
[ "0.63733923", "0.61233914", "0.60425943", "0.5986746", "0.59847146", "0.5970008", "0.596638", "0.59152776", "0.590361", "0.5881575", "0.58584493", "0.5775308", "0.56499285", "0.56463957", "0.5642021", "0.5598647", "0.55952555", "0.5585957", "0.5569665", "0.55653036", "0.55639...
0.0
-1
The Assessment Key Unique key for the assessment type
def assessment_name(self) -> Optional[pulumi.Input[str]]: return pulumi.get(self, "assessment_name")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def key(self):\n return str(self._id)", "def get_key_id(self):", "def generate_key(self):\n return str(uuid4())", "def key(self):\n\n return self.qualifiers.get(\"key\", False)", "def key(self) -> str:\n return pulumi.get(self, \"key\")", "def key(self) -> str:\n return...
[ "0.6836664", "0.6800989", "0.67905533", "0.6716157", "0.66948956", "0.66948956", "0.66948956", "0.66948956", "0.66948956", "0.66948956", "0.66948956", "0.66948956", "0.66948956", "0.66948956", "0.66948956", "0.66948956", "0.66948956", "0.66948956", "0.66948956", "0.66948956", ...
0.0
-1
Describes properties of an assessment metadata.
def metadata(self) -> Optional[pulumi.Input['SecurityAssessmentMetadataPropertiesArgs']]: return pulumi.get(self, "metadata")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def metadata(self) -> global___SummaryMetadata:", "def metadata(self) -> pulumi.Output[Optional['outputs.SecurityAssessmentMetadataPropertiesResponse']]:\n return pulumi.get(self, \"metadata\")", "def metadata(self) -> pulumi.Output[Optional['outputs.SecurityAssessmentMetadataPropertiesResponse']]:\n ...
[ "0.64961946", "0.6462961", "0.6462961", "0.6290093", "0.6145428", "0.6084069", "0.6051594", "0.60471356", "0.5994781", "0.5933389", "0.59084606", "0.58996195", "0.5870387", "0.5848383", "0.5837634", "0.5826372", "0.5826372", "0.5818724", "0.58181715", "0.5814273", "0.5807516"...
0.64541817
3
Data regarding 3rd party partner integration
def partners_data(self) -> Optional[pulumi.Input['SecurityAssessmentPartnerDataArgs']]: return pulumi.get(self, "partners_data")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def info():\n if g.party_id is None:\n # No party is configured for the current site.\n abort(404)\n\n party = party_service.get_party(g.party_id)\n\n return {\n 'party': party,\n }", "def get(self,\n partner_id):\n abort(501)", "def get(self,\n par...
[ "0.62221545", "0.56236756", "0.56236756", "0.5579682", "0.5457257", "0.5414618", "0.5382873", "0.5349557", "0.53429776", "0.5338125", "0.5333075", "0.53239155", "0.5318248", "0.5313936", "0.53106076", "0.5300438", "0.528713", "0.52863556", "0.52863556", "0.528309", "0.5242493...
0.5107046
28
Security assessment on a resource response format
def __init__(__self__, resource_name: str, opts: Optional[pulumi.ResourceOptions] = None, additional_data: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None, assessment_name: Optional[pulumi.Input[str]] = None, metadata: Optional[pulumi.Input[pulumi.InputType['SecurityAssessmentMetadataPropertiesArgs']]] = None, partners_data: Optional[pulumi.Input[pulumi.InputType['SecurityAssessmentPartnerDataArgs']]] = None, resource_details: Optional[pulumi.Input[Union[pulumi.InputType['AzureResourceDetailsArgs'], pulumi.InputType['OnPremiseResourceDetailsArgs'], pulumi.InputType['OnPremiseSqlResourceDetailsArgs']]]] = None, resource_id: Optional[pulumi.Input[str]] = None, status: Optional[pulumi.Input[pulumi.InputType['AssessmentStatusArgs']]] = None, __props__=None): ...
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def validate_response(self, response):\n pass", "def get_secured():\n return jsonify({'isSecured': config.requires_auth()})", "def getSecurity(self):\n return self.client.get(self.name +\"/_security\").getBodyData()", "def ExtractSecurityMarksFromResponse(response, args):\n del args\n list_a...
[ "0.5803334", "0.57779896", "0.5722596", "0.55923194", "0.5541996", "0.5463912", "0.54613954", "0.54241943", "0.5412314", "0.53474534", "0.53294784", "0.5312035", "0.52927554", "0.5291069", "0.5280915", "0.52559435", "0.5234564", "0.52308244", "0.5222391", "0.5222391", "0.5207...
0.0
-1
Security assessment on a resource response format
def __init__(__self__, resource_name: str, args: AssessmentArgs, opts: Optional[pulumi.ResourceOptions] = None): ...
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def validate_response(self, response):\n pass", "def get_secured():\n return jsonify({'isSecured': config.requires_auth()})", "def getSecurity(self):\n return self.client.get(self.name +\"/_security\").getBodyData()", "def ExtractSecurityMarksFromResponse(response, args):\n del args\n list_a...
[ "0.5803334", "0.57779896", "0.5722596", "0.55923194", "0.5541996", "0.5463912", "0.54613954", "0.54241943", "0.5412314", "0.53474534", "0.53294784", "0.5312035", "0.52927554", "0.5291069", "0.5280915", "0.52559435", "0.5234564", "0.52308244", "0.5222391", "0.5222391", "0.5207...
0.0
-1
Get an existing Assessment resource's state with the given name, id, and optional extra properties used to qualify the lookup.
def get(resource_name: str, id: pulumi.Input[str], opts: Optional[pulumi.ResourceOptions] = None) -> 'Assessment': opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id)) __props__ = AssessmentArgs.__new__(AssessmentArgs) __props__.__dict__["additional_data"] = None __props__.__dict__["display_name"] = None __props__.__dict__["links"] = None __props__.__dict__["metadata"] = None __props__.__dict__["name"] = None __props__.__dict__["partners_data"] = None __props__.__dict__["resource_details"] = None __props__.__dict__["status"] = None __props__.__dict__["type"] = None return Assessment(resource_name, opts=opts, __props__=__props__)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get(resource_name: str,\n id: pulumi.Input[str],\n opts: Optional[pulumi.ResourceOptions] = None) -> 'Assessment':\n opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))\n\n __props__ = dict()\n\n __props__[\"additional_data\"] = None\n __pr...
[ "0.6976486", "0.608855", "0.5846438", "0.5757699", "0.5648362", "0.55919707", "0.5582545", "0.55817664", "0.55264586", "0.55085826", "0.550386", "0.5496845", "0.54861027", "0.5467891", "0.54414856", "0.53757304", "0.53509825", "0.53232807", "0.5311685", "0.52735114", "0.52480...
0.6898669
1
Additional data regarding the assessment
def additional_data(self) -> pulumi.Output[Optional[Mapping[str, str]]]: return pulumi.get(self, "additional_data")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def student_view_data(self, context=None):\n return {\n 'title': self.title,\n 'description': self.description,\n 'embed_code': self.embed_code,\n 'highres_url': self.highres_url,\n 'lowres_url': self.lowres_url,\n }", "def get_assessment(self)...
[ "0.63733923", "0.61233914", "0.60425943", "0.5986746", "0.59847146", "0.5970008", "0.596638", "0.59152776", "0.590361", "0.5881575", "0.58584493", "0.5775308", "0.56499285", "0.56463957", "0.5642021", "0.5598647", "0.55952555", "0.5585957", "0.5569665", "0.55653036", "0.55639...
0.5422631
31
User friendly display name of the assessment
def display_name(self) -> pulumi.Output[str]: return pulumi.get(self, "display_name")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def assessment_name(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"assessment_name\")", "def __str__(self):\n # Use 'Unknown' if the course instance does not have a term\n if self.course_instance.term:\n term = self.course_instance.term.verbose_name()\n e...
[ "0.76884186", "0.72017074", "0.71944445", "0.71944445", "0.71944445", "0.71944445", "0.71944445", "0.71944445", "0.71944445", "0.71944445", "0.71944445", "0.71944445", "0.71944445", "0.71944445", "0.6931914", "0.69249964", "0.6901365", "0.68354696", "0.6828654", "0.6815666", ...
0.6926838
21
Links relevant to the assessment
def links(self) -> pulumi.Output['outputs.AssessmentLinksResponse']: return pulumi.get(self, "links")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def getLink(self):", "def exam_url(self, obj):\n request = self.context.get(\"request\")\n return reverse(\"exam-detail\", args=[obj.id], request=request)", "def get_absolute_url(self):\n return reverse('trait_browser:source:studies:pk:detail', kwargs={'pk': self.pk})", "def href(self, r...
[ "0.6158211", "0.5980082", "0.5797147", "0.57784724", "0.5716922", "0.5695942", "0.56922674", "0.56786233", "0.5670207", "0.5577552", "0.5543779", "0.5527378", "0.5521695", "0.5521499", "0.5518003", "0.55149287", "0.551347", "0.5483872", "0.5457637", "0.5448972", "0.5433619", ...
0.65279543
0
Describes properties of an assessment metadata.
def metadata(self) -> pulumi.Output[Optional['outputs.SecurityAssessmentMetadataPropertiesResponse']]: return pulumi.get(self, "metadata")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def metadata(self) -> global___SummaryMetadata:", "def metadata(self) -> Optional[pulumi.Input['SecurityAssessmentMetadataPropertiesArgs']]:\n return pulumi.get(self, \"metadata\")", "def get_assessment_metadata(self):\n return Metadata(**settings.METADATA['assessment_id'])", "def describe(self...
[ "0.64961946", "0.64541817", "0.6290093", "0.6145428", "0.6084069", "0.6051594", "0.60471356", "0.5994781", "0.5933389", "0.59084606", "0.58996195", "0.5870387", "0.5848383", "0.5837634", "0.5826372", "0.5826372", "0.5818724", "0.58181715", "0.5814273", "0.5807516", "0.5801381...
0.6462961
1
Data regarding 3rd party partner integration
def partners_data(self) -> pulumi.Output[Optional['outputs.SecurityAssessmentPartnerDataResponse']]: return pulumi.get(self, "partners_data")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def info():\n if g.party_id is None:\n # No party is configured for the current site.\n abort(404)\n\n party = party_service.get_party(g.party_id)\n\n return {\n 'party': party,\n }", "def get(self,\n partner_id):\n abort(501)", "def get(self,\n par...
[ "0.62221545", "0.56236756", "0.56236756", "0.5579682", "0.5457257", "0.5414618", "0.5382873", "0.5349557", "0.53429776", "0.5338125", "0.5333075", "0.53239155", "0.5318248", "0.5313936", "0.53106076", "0.5300438", "0.528713", "0.528309", "0.5242493", "0.52211165", "0.52005583...
0.52863556
17
Details of the resource that was assessed
def resource_details(self) -> pulumi.Output[Any]: return pulumi.get(self, "resource_details")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_resource_details (self):\n return (f\"[Title:\\\"{self.get_title()}\\\"] [Author:{self.get_author()}] [Publisher:{self.get_publisher()}] [Year:{self.get_year()}]\")", "def resource(self):\n return str(self._resource)", "def resource(self):\n return self._resource", "def resource(...
[ "0.76440537", "0.7001793", "0.6875115", "0.6875115", "0.6875115", "0.6875115", "0.6875115", "0.6875115", "0.6875115", "0.686545", "0.6756598", "0.6750164", "0.66482615", "0.6627125", "0.6553682", "0.6521818", "0.6483526", "0.64652663", "0.6450048", "0.6442991", "0.63966125", ...
0.7746103
0
The result of the assessment
def status(self) -> pulumi.Output['outputs.AssessmentStatusResponseResponse']: return pulumi.get(self, "status")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def result(self):\n return self['result']", "def results(self):\n\n\t\tresults = {'answer':42}\n\n\t\treturn results", "def result(self):\n\n print('Ergebnisse: -------------\\n'\n 'Richtige Antworten:{} \\n'\n 'Falsche Antworten:{} \\n'.format(self.answer_right, self.a...
[ "0.7455872", "0.7048662", "0.687895", "0.68733793", "0.6868782", "0.6851356", "0.68279386", "0.6774118", "0.67248684", "0.67248684", "0.67248684", "0.65798163", "0.65798163", "0.6516882", "0.6478505", "0.6459931", "0.645169", "0.64418316", "0.64348304", "0.6432639", "0.639982...
0.6113301
36
Test get_type_for_key_path with Simple Key Path
def test_get_type_for_key_path_simple_path(test_schema): assert get_type_for_key_path(test_schema, "Age") == "integer"
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_get_type_for_key_path_multi_level(test_schema):\n assert (\n get_type_for_key_path(test_schema, \"EmploymentInformation.Beneficiary.Name\")\n == \"string\"\n )", "def test_get_type_for_key_path_invalid_key_path(test_schema):\n assert get_type_for_key_path(test_schema, \"foo.bar\")...
[ "0.7844626", "0.75669205", "0.74641645", "0.7052028", "0.65783304", "0.65595055", "0.65575176", "0.62750286", "0.59552884", "0.59359103", "0.5933874", "0.59200144", "0.58311874", "0.5820171", "0.5779001", "0.5731968", "0.5724418", "0.5718408", "0.5717397", "0.5695019", "0.567...
0.84244055
0
Test get_type_for_key_path with key path of one level deep
def test_get_type_for_key_path_depth_one_level(test_schema): assert ( get_type_for_key_path(test_schema, "EmploymentInformation.OriginalHireDate") == "string" )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_get_type_for_key_path_multi_level(test_schema):\n assert (\n get_type_for_key_path(test_schema, \"EmploymentInformation.Beneficiary.Name\")\n == \"string\"\n )", "def test_get_type_for_key_path_simple_path(test_schema):\n assert get_type_for_key_path(test_schema, \"Age\") == \"int...
[ "0.81315565", "0.7790559", "0.73348916", "0.6510662", "0.63362354", "0.6123883", "0.5945233", "0.59033793", "0.5871133", "0.58505154", "0.58181745", "0.579614", "0.5716585", "0.56624275", "0.5648696", "0.56418836", "0.5635143", "0.5609441", "0.5574678", "0.55583984", "0.55554...
0.78562295
1
Test get_type_for_key_path with multi level key path
def test_get_type_for_key_path_multi_level(test_schema): assert ( get_type_for_key_path(test_schema, "EmploymentInformation.Beneficiary.Name") == "string" )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_get_type_for_key_path_simple_path(test_schema):\n assert get_type_for_key_path(test_schema, \"Age\") == \"integer\"", "def test_get_type_for_key_path_depth_one_level(test_schema):\n assert (\n get_type_for_key_path(test_schema, \"EmploymentInformation.OriginalHireDate\")\n == \"strin...
[ "0.78702086", "0.77100694", "0.718522", "0.6605864", "0.6282174", "0.62626797", "0.5969291", "0.59365714", "0.59263676", "0.5847719", "0.5822397", "0.5740006", "0.5724306", "0.57241476", "0.56666523", "0.56429887", "0.563662", "0.5624624", "0.56129014", "0.55840045", "0.55359...
0.8355806
0
Test get_type_for_key_path with invalid key path
def test_get_type_for_key_path_invalid_key_path(test_schema): assert get_type_for_key_path(test_schema, "foo.bar") == None
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_get_type_for_key_path_simple_path(test_schema):\n assert get_type_for_key_path(test_schema, \"Age\") == \"integer\"", "def test_get_type_for_key_path_multi_level(test_schema):\n assert (\n get_type_for_key_path(test_schema, \"EmploymentInformation.Beneficiary.Name\")\n == \"string\"\...
[ "0.7941828", "0.74922115", "0.73346525", "0.67035246", "0.6578848", "0.64337885", "0.64240384", "0.64203644", "0.6280829", "0.6259001", "0.6171811", "0.61548215", "0.61433345", "0.61050516", "0.6074959", "0.60654056", "0.6059521", "0.6050425", "0.60475475", "0.6045783", "0.60...
0.8730529
0
Create a mesh of points to plot in
def make_meshgrid(x, y, h=.02): x_min, x_max = x.min() - 1, x.max() + 1 y_min, y_max = y.min() - 1, y.max() + 1 xx, yy = np.meshgrid(np.arange(x_min, x_max, h), np.arange(y_min, y_max, h)) return xx, yy
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def to_mesh(\n self,\n lims_x: array_like = (-1, 1),\n lims_y: array_like = (-1, 1),\n ) -> Tuple[np.ndarray, np.ndarray, np.ndarray]:\n a, b, c, d = self.cartesian()\n x_center, y_center = self.point[:2]\n\n values_x = x_center + lims_x\n values_y = y_center + l...
[ "0.7197377", "0.6756", "0.6733346", "0.6643686", "0.6442712", "0.6364836", "0.63441443", "0.6343205", "0.63342935", "0.62988", "0.627023", "0.623843", "0.62137896", "0.61971325", "0.6183295", "0.6164846", "0.6139129", "0.61126757", "0.61126757", "0.6105098", "0.6105098", "0...
0.6036985
24
Plot the decision boundaries for a classifier.
def plot_contours(ax, clf, xx, yy, **params): Z = clf.predict(np.c_[xx.ravel(), yy.ravel()]) Z = Z.reshape(xx.shape) out = ax.contourf(xx, yy, Z, **params) return out
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def plot_decision_regions(X, y, classifier, resolution=0.02):\n #setup marker generator and color map\n markers = ('s', 'x', 'o', '^', 'v')\n colors = ('red', 'blue', 'lightgreen', 'gray', 'cyan')\n cmap = ListedColormap(colors[:len(np.unique(y))])\n\n #plot the decision surface\n #just find the ...
[ "0.72929466", "0.7281941", "0.7130415", "0.70138526", "0.70124936", "0.69807297", "0.69738376", "0.6969092", "0.6924797", "0.68875146", "0.6852066", "0.6791433", "0.66879207", "0.66742694", "0.66299", "0.6457393", "0.628703", "0.62751436", "0.62345755", "0.61874735", "0.61633...
0.5346783
71
Guts for `~trigger.utils.url.parse_url`. Based on Kombu's ``kombu.utils.url``.
def _parse_url(url): parts = urlparse(url) scheme = parts.scheme port = parts.port or None hostname = parts.hostname path = parts.path or '' virtual_host = path[1:] if path and path[0] == '/' else path return (scheme, unquote(hostname or '') or None, port, unquote(parts.username or '') or None, unquote(parts.password or '') or None, unquote(path or '') or None, unquote(virtual_host or '') or None, unquote(parts.query or '') or None, dict(dict(parse_qsl(parts.query))))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __ParseUrl(url):\n return urlparse(url)", "def _parse(url):\n url = url.strip()\n parsed = urlparse(url)\n return _parsed_url_args(parsed)", "def urlparse(url):\n\tunquote_url=urllib.parse.unquote(url)\n\treturn unquote_url", "def _parse_url(self, url):\n url_prefix = self.URL_PREFIX\n ...
[ "0.8100496", "0.74347377", "0.7177285", "0.7094402", "0.69739527", "0.69297016", "0.6924397", "0.68131274", "0.67186797", "0.6689108", "0.66737133", "0.6653376", "0.6621237", "0.6607721", "0.6598066", "0.65887487", "0.652699", "0.65201724", "0.65014964", "0.64695275", "0.6461...
0.7226152
2
Given a ``url`` returns, a dict of its constituent parts. Based on Kombu's ``kombu.utils.url``.
def parse_url(url): scheme, host, port, user, passwd, path, vhost, qs, qs_dict = _parse_url(url) return dict(scheme=scheme, hostname=host, port=port, username=user, password=passwd, path=path, virtual_host=vhost, query=qs, **qs_dict)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def parse_url(url):\n parsed = urlparse(url)\n return {\n \"scheme\": parsed.scheme,\n \"netloc\": parsed.netloc,\n \"path\": parsed.path,\n \"qs\": parse_qs(parsed.query),\n }", "def parse_url(url):\n results = NotifyBase.parse_url(url)\n if not results:\n ...
[ "0.7244224", "0.70891213", "0.68429947", "0.6841017", "0.66689384", "0.6423777", "0.6402168", "0.6328875", "0.6304817", "0.62504447", "0.6184663", "0.6176373", "0.6129636", "0.6107103", "0.61060536", "0.60760194", "0.6045592", "0.6023665", "0.6001749", "0.59867436", "0.597842...
0.682851
4
Test if game is to be won/lost by player.
def test_case_if_row_is_about_to_be_foobar(self, mock_game): test_game = Game(3, "playerX", "playerO") test_game.game_id = 1002 test_game.board_size = 3 test_game.your_move = "X" test_game.board_blob = json.dumps(['X', '', 'O', 'X', '', '', 'X', 'X', '']) assert(test_game.is_row_in_danger(0, "X") is False) assert(test_game.is_row_in_danger(1, "X") == [4, 5]) assert(test_game.is_row_in_danger(2, "X") == [8])
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def has_won(board, player):\r\n return False", "def is_game_won(self):\n return True", "def has_won(board, player):\n return False", "def check_game(self):\n gameOver = None\n if self.turn > 4:\n gameOver = self.check_x_won()\n if gameOver is True:\n ...
[ "0.8094866", "0.8090733", "0.8032095", "0.8010544", "0.79272014", "0.7739902", "0.7669931", "0.75953156", "0.75601757", "0.7536703", "0.7533477", "0.7526654", "0.7492473", "0.7458491", "0.7454935", "0.7428993", "0.73916984", "0.7306342", "0.72584236", "0.7249623", "0.7238585"...
0.0
-1
Evaluate and apply formatting on template, apply any art if provided. Any additional parameters are passed as extra variables to the template. The extra variables have priority when there's conflicting variable names.
def run(self, template: str, art: Optional[str] = None, **kwargs: Any) -> str: variables = self.__dict__ variables.update(kwargs) template = CustomFormats().format(template, **variables) if art: art = art.format(nfo=template) template = art for m in re.finditer(r"<\?([01])\?([\D\d]*?)\?>", template): # TODO: This if check is quite yucky, look into alternative options. # Ideally a custom format spec would be great. template = template.replace( m.group(0), m.group(2) if int(m.group(1)) else "" ) template = "\n".join(map(str.rstrip, template.splitlines(keepends=False))) return template
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def render(template, variables={}):\r\n\treturn prettify( parse(template).render(dict(variables.items())) )", "def format_template(template, *args):\n return textwrap.dedent(template % args).strip()", "def formatEval(self, template, attrs, scale=1, noScale=None):\n # Boat width not stored, so calcula...
[ "0.5875008", "0.58742577", "0.58426213", "0.5590589", "0.5554982", "0.5440268", "0.5436101", "0.53913677", "0.5359046", "0.5331052", "0.530326", "0.5283194", "0.5190745", "0.51852167", "0.5137004", "0.51337534", "0.5114587", "0.50861675", "0.5056899", "0.5040804", "0.5000452"...
0.6679136
0
Get an IMDB ID from either the media's global tags, or the config. Since IMDB IDs are required for this project, it will bug the user for one interactively if not found.
def get_imdb_id(self, imdb_id: Any) -> str: if not imdb_id: general_track = self.media_info.general_tracks[0].to_data() imdb_id = general_track.get("imdb") if not imdb_id: print("No IMDB ID was provided but is required...") while not imdb_id or not isinstance(imdb_id, str): user_id = input("IMDB ID (e.g., 'tt0487831'): ") if not self.IMDB_ID_T.match(user_id): print(f"The provided IMDB ID {user_id!r} is not valid...") print("Expected e.g., 'tt0487831', 'tt10810424', (include the 'tt').") else: imdb_id = user_id return imdb_id
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def alternative_media_id(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"alternative_media_id\")", "def imdb_id(title):\n pass", "def _get_id(mf, url=None):\n\n\tprops = mf['properties']\n\n\tif 'uid' in props:\n\t\treturn props['uid'][0]\n\telif 'url' in props:\n\t\treturn props['...
[ "0.5761104", "0.5743748", "0.559037", "0.5428841", "0.5407892", "0.5339938", "0.5334296", "0.53217006", "0.5253112", "0.52254945", "0.52251637", "0.5214888", "0.5210509", "0.5178844", "0.5142981", "0.5113672", "0.5091264", "0.5082251", "0.5070948", "0.5068511", "0.5062682", ...
0.64897996
0
Get a TMDB ID from either the media's global tags, or the config. It will raise a ValueError if the provided ID is invalid.
def get_tmdb_id(self, tmdb_id: Any) -> Optional[str]: if not tmdb_id: general_track = self.media_info.general_tracks[0].to_data() tmdb_id = general_track.get("tmdb") if not tmdb_id: print("Warning: No TMDB ID was provided...") return None if not self.TMDB_ID_T.match(tmdb_id) or not isinstance(tmdb_id, str): print(f"The provided TMDB ID {tmdb_id!r} is not valid...") print("Expected e.g., 'tv/2490', 'movie/14836', (include the 'tv/' or 'movie/').") raise ValueError("Invalid TMDB ID") return tmdb_id
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_tvdb_id(self, tvdb_id: Any) -> Optional[int]:\n if not tvdb_id:\n general_track = self.media_info.general_tracks[0].to_data()\n tvdb_id = general_track.get(\"tvdb\")\n if not tvdb_id:\n print(\"Warning: No TVDB ID was provided...\")\n return None\n ...
[ "0.64487255", "0.5630184", "0.5409801", "0.54041606", "0.54024845", "0.53911626", "0.5343543", "0.53333956", "0.5306317", "0.5306317", "0.5306317", "0.5306317", "0.5306317", "0.5306317", "0.52967757", "0.52887344", "0.52816087", "0.5266011", "0.52260435", "0.522252", "0.52078...
0.64089936
1
Get a TVDB ID from either the media's global tags, or the config. It will raise a ValueError if the provided ID is invalid.
def get_tvdb_id(self, tvdb_id: Any) -> Optional[int]: if not tvdb_id: general_track = self.media_info.general_tracks[0].to_data() tvdb_id = general_track.get("tvdb") if not tvdb_id: print("Warning: No TVDB ID was provided...") return None if isinstance(tvdb_id, int): tvdb_id = str(tvdb_id) if not self.TVDB_ID_T.match(tvdb_id) or not isinstance(tvdb_id, str): print(f"The provided TVDB ID {tvdb_id!r} is not valid...") print("Expected e.g., '79216', '1395', (not the url slug e.g., 'the-office-us').") raise ValueError("Invalid TVDB ID") return int(tvdb_id)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_tmdb_id(self, tmdb_id: Any) -> Optional[str]:\n if not tmdb_id:\n general_track = self.media_info.general_tracks[0].to_data()\n tmdb_id = general_track.get(\"tmdb\")\n if not tmdb_id:\n print(\"Warning: No TMDB ID was provided...\")\n return None\n ...
[ "0.60287786", "0.5454019", "0.5437956", "0.5360313", "0.5346196", "0.5333645", "0.5321893", "0.52784383", "0.5270552", "0.5240752", "0.5226761", "0.5213634", "0.5184168", "0.5155326", "0.5155326", "0.5155326", "0.5155326", "0.5155326", "0.5155326", "0.51372594", "0.5098536", ...
0.694675
0
Scrape Title Name and Year (including e.g. 2019) from IMDB
def get_title_name_year(self) -> Tuple[str, str]: r = self.session.get(f"https://www.imdb.com/title/{self.imdb}") if r.status_code != 200: raise ValueError(f"An unexpected error occurred getting IMDB Title Page [{r.status_code}]") imdb_page = html.unescape(r.text) imdb_title = re.search( # testing ground: https://regex101.com/r/bEoEDn/1 r"<title>(?P<name>.+) \(((?P<type>TV (Movie|Series|Mini[- ]Series|Short|Episode) |Video |Short |)" r"(?P<year>(\d{4})(|– |–\d{4})))\) - IMDb</title>", imdb_page ) if not imdb_title: raise ValueError(f"Could not scrape Movie Title or Year for {self.imdb}...") return imdb_title.group("name").strip(), imdb_title.group("year").strip()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def scrape_movie_page(dom):\n # to save the information\n info = []\n\n # find the information block needed\n header = dom.find(\"div\", \"title_wrapper\")\n\n # find the title and strip the string\n name_dom = header.h1.get_text().encode(\"utf-8\")\n name = str(name_dom)[2:-16]\n info.appe...
[ "0.6832312", "0.6682347", "0.66435677", "0.6387017", "0.637413", "0.63639027", "0.633812", "0.6296725", "0.61997265", "0.6172732", "0.6140767", "0.61179507", "0.60745186", "0.60595584", "0.59533656", "0.59527224", "0.5951921", "0.5943582", "0.5937595", "0.59366393", "0.592958...
0.7373919
0
Calculate total episode count based on neighbouring sameextension files.
def get_tv_episodes(self) -> int: return len(glob.glob(os.path.join( os.path.dirname(self.file), f"*{os.path.splitext(self.file)[-1]}" )))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def n_episodes(self):\n raise NotImplementedError", "def return_episode_num(name):\n return int(name.split(\".\")[0].split(\"ep_\")[1]) # Use split to return only the episode number needed to sort the files in increasing order", "def _get_total_games(self) -> int:\n files = get_tfr_filenames(...
[ "0.58617145", "0.5788146", "0.5688501", "0.5621688", "0.551865", "0.5483126", "0.54120064", "0.54120064", "0.5410899", "0.5262325", "0.52515316", "0.5230109", "0.52180594", "0.52164894", "0.5211398", "0.52089113", "0.52078176", "0.5205271", "0.5205262", "0.5201948", "0.518634...
0.6543224
0
Retrieve the release name based on the file used during MediaInfo. If a season was specified, but an episode number was not, it presumes the release is a Pack. Hence when pack, it uses the parent folder's name as the release name.
def get_release_name(self) -> str: if self.season is not None and self.episode is None: return os.path.basename(os.path.dirname(self.file)) return os.path.splitext(os.path.basename(self.file))[0]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def getApplicationReleaseName(self) -> unicode:\n ...", "def title(self):\n if self.file_name is None:\n return None\n else:\n fname = os.path.split(self.file_name)[-1]\n fname, *ext = fname.rsplit('.', 1)\n procgen = ext and ext[0] in ('json', 'ya...
[ "0.59712", "0.59365255", "0.58621913", "0.57642496", "0.5731158", "0.5726277", "0.57143974", "0.5686276", "0.5618006", "0.557107", "0.5563853", "0.5559816", "0.5543877", "0.55142844", "0.55000263", "0.5495673", "0.5444121", "0.5430501", "0.5416234", "0.54096", "0.5329431", ...
0.81937546
0
Get a wide banner image from fanart.tv. Currently restricts banners to Englishonly.
def get_banner_image(self, tvdb_id: int) -> Optional[str]: if not tvdb_id: return None if not self.fanart_api_key: raise ValueError("Need Fanart.tv api key for TV titles!") r = self.session.get(f"http://webservice.fanart.tv/v3/tv/{tvdb_id}?api_key={self.fanart_api_key}") if r.status_code == 404: return None res = r.json() error = res.get("error message") if error: if error == "Not found": return None raise ValueError(f"An unexpected error occurred while calling Fanart.tv, {res}") banner = next(( x["url"] for x in (res.get("tvbanner") or []) if x["lang"] == sorted(self.audio, key=lambda x: x.streamorder)[0].language ), None) return banner
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def render_banner(self, width=300, height=85):\n img_path = IMG_PATH + os.sep + CARD_BANNER\n banner_img = Image.open(img_path)\n banner_img = banner_img.resize((width, height))\n return banner_img", "def banner_wrapper(banner_url):\n # so simple\n return '{url}<img src=...
[ "0.63562655", "0.5907053", "0.58398026", "0.56723255", "0.55253977", "0.55232245", "0.55212253", "0.5516398", "0.532808", "0.5248593", "0.52476937", "0.52444804", "0.5240686", "0.51916087", "0.51215434", "0.5034326", "0.5020774", "0.5014858", "0.49858487", "0.4975781", "0.496...
0.6869307
0
Return a list of a brief subtitle overview persubtitle. e.g. English, Forced, SubRip (SRT) English, SubRip (SRT) English, SDH, SubRip (SRT) Spanish, Latin American (SDH), SubRip (SRT) The bit of text between the Language and the Subtitle format is the Track Title. It can be of any format, but it is recommended to be used as shown above. It will be returned as a list of strings with the ` ` already prepended to each entry.
def get_subtitle_print(subs: List[Track]) -> List[str]: data = [] if not subs: data.append("--") for sub in subs: line_items = [] # following sub.title tree checks and supports three different language and title scenarios # The second scenario is the recommended option to choose if you are open to choosing any # The third scenario should be used if you have nothing unique to state about the track # | Language | Track Title | Output | # | ------------ | ----------------------------- | --------------------------------------------- | # | es / Spanish | Spanish (Latin American, SDH) | - Spanish (Latin American, SDH), SubRip (SRT) | # | es / Spanish | Latin American (SDH) | - Spanish, Latin American (SDH), SubRip (SRT) | # | es / Spanish | None | - Spanish, SubRip (SRT) | language = pycountry.languages.get(alpha_2=sub.language).name if sub.title: if language.lower() in sub.title.lower(): line_items.append(sub.title) else: line_items.append(f"{language}, {sub.title}") else: line_items.append(language) line_items.append(sub.format.replace("UTF-8", "SubRip (SRT)")) line = "- " + ", ".join(line_items) data += [ (" " + x if i > 0 else x) for i, x in enumerate(textwrap.wrap(line, 64)) ] return data
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def parse_title(self) -> list:\n scanning = False # start of a title is found, this may be the second of later part of that.\n ret = [] # to return\n temp = [] # deal with mutiple line titles.\n for page in self.pdf.pages:\n text = page.extract_text()\n # it's p...
[ "0.6559989", "0.626816", "0.61477166", "0.59452933", "0.5862041", "0.58259624", "0.57575667", "0.5716959", "0.57007366", "0.56698", "0.566014", "0.5631774", "0.56170785", "0.5601238", "0.55479985", "0.55277705", "0.55247784", "0.55187654", "0.5466009", "0.5463526", "0.5434021...
0.73892254
0
The mins method returns the lower bounds of the action spaces' parameters.
def mins(self) -> Tensor: return self._ranges[:, 0]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def mins(self):\n return self._mins", "def bounds(self) -> Tensor:\n return torch.cat([self.mins, self.mins + self.ranges], dim=-2)", "def mins(self):\n return self.intervals[:, 0]", "def argminX( self ):\n min = 1e30\n minX = None\n for i in range( 0, self.GetN() ):\n ...
[ "0.65231216", "0.63490486", "0.6329805", "0.6140933", "0.604615", "0.5979384", "0.59507585", "0.5943575", "0.5881969", "0.5867249", "0.58545846", "0.5735611", "0.5735611", "0.5695982", "0.568224", "0.5630734", "0.56240106", "0.55890894", "0.556751", "0.55503213", "0.55368114"...
0.6706375
0
The maxs method returns the upper bounds of the action spaces' parameters.
def maxs(self) -> Tensor: return self._ranges[:, 1]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def maxs(self):\n return self._maxs", "def get_parameters_max(self):\n maxValues = numpy.zeros(self.get_num_parameters())\n i = 0\n for p in self.parameters:\n maxValues[i] = p.get_max_value()\n i += 1\n return maxValues", "def get_bounds(self):\n ...
[ "0.6918863", "0.66844064", "0.6673597", "0.6575035", "0.6552709", "0.6513679", "0.6467321", "0.6414459", "0.62708175", "0.62123346", "0.620865", "0.62085176", "0.62085176", "0.6191484", "0.6156986", "0.6142866", "0.6112747", "0.6100505", "0.6096278", "0.60705197", "0.60639876...
0.6770987
1
The _generate_iterator method creates an iterator which runs over all possible parameter combinations
def _generate_iterator(self) -> Iterable: params: List[Tensor] = [] for angle_range in self._ranges: lin_space: Tensor = linspace(angle_range[0], angle_range[1], steps=self._num_steps) params.append(lin_space) power: int dims: int for i in range(0, self._num_params): power = len(self._ranges) - 1 - i dims = i params[i] = params[i].repeat_interleave(self._num_steps ** power) params[i] = params[i].broadcast_to((self._num_steps ** dims, self._num_steps ** (power + 1))).flatten() return zip(*params)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __iter__(self):\n for p in self.param_grid:\n # Always sort the keys of a dictionary, for reproducibility\n items = sorted(p.items())\n if not items:\n yield {}\n else:\n keys, values = zip(*items)\n for v in produc...
[ "0.7333558", "0.6785712", "0.67566705", "0.67345154", "0.66711015", "0.66702646", "0.66659456", "0.64822334", "0.6465031", "0.64270854", "0.64160955", "0.6409563", "0.638886", "0.638886", "0.638886", "0.638886", "0.6387711", "0.6350889", "0.6341186", "0.6333276", "0.6317058",...
0.73833257
0
computes and returns a complex rotation matrix given by the angles in params.
def operator(self, params: Tensor) -> Tensor: theta, phi = params # calculate entries a: Tensor = exp(1j * phi) * cos(theta / 2) b: Tensor = sin(theta / 2) c: Tensor = -b d: Tensor = exp(-1j * phi) * cos(theta / 2) # construct the rows of the rotation matrix r1: Tensor = cat((a.view(1), b.view(1))) r2: Tensor = cat((c.view(1), d.view(1))) # build and return the rotation matrix rot: Tensor = cat((r1, r2)).view(2, 2) return rot
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def rotation_matrix(theta=0, phi=0, psi=0, units='deg'):\n\n rpy = Rpy(theta,units)\n rmx = Rmx(phi, units)\n rpz = Rpz(psi, units)\n\n return np.matmul(rpy, np.matmul(rmx, rpz))", "def rotation_matrix3(angle_x=0, angle_y=0, angle_z=0):\n if angle_x != 0:\n c, s = cos(angle_x), sin(angle_x)...
[ "0.6859258", "0.68380105", "0.6828468", "0.67929703", "0.6778919", "0.6778919", "0.6726848", "0.6723164", "0.67022794", "0.67022794", "0.67022794", "0.66824156", "0.6651829", "0.66406155", "0.6638365", "0.66007775", "0.6590055", "0.6520805", "0.6512235", "0.6512235", "0.64985...
0.6610632
15
Implementation of the triplet loss as defined by formula (3)
def triplet_loss(y_true, y_pred, alpha=0.2): anchor, positive, negative = y_pred[0], y_pred[1], y_pred[2] # Step 1: Compute the (encoding) distance between the anchor and the positive, you will need to sum over axis=-1 pos_dist = K.sum(K.square(anchor - positive), axis=-1) # Step 2: Compute the (encoding) distance between the anchor and the negative, you will need to sum over axis=-1 neg_dist = K.sum(K.square(anchor - negative), axis=-1) # Step 3: subtract the two previous distances and add alpha. basic_loss = pos_dist - neg_dist + alpha # Step 4: Take the maximum of basic_loss and 0.0. Sum over the training examples. loss = K.sum(K.maximum(basic_loss, 0)) return loss
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def triplet_loss(y_true, y_pred):\n [a,p,n] = tf.unstack(y_pred, num=3)\n pos_dist = tf.reduce_sum((a - p)**2, axis=-1)\n neg_dist = tf.reduce_sum((a - n)**2, axis=-1)\n basic_loss = pos_dist - neg_dist + 0.1\n loss = tf.reduce_sum(tf.maximum(basic_loss, 0.0)) \n return loss", "de...
[ "0.7934357", "0.78243315", "0.76634", "0.76594365", "0.75780183", "0.75132746", "0.74581045", "0.7425377", "0.7383122", "0.7329623", "0.731566", "0.7292533", "0.7243651", "0.7238686", "0.7167928", "0.714028", "0.7125176", "0.7010623", "0.69959813", "0.6995111", "0.69010466", ...
0.71021456
17
Function to rotate one vector to another, inspired by vrrotvec.m in MATLAB
def vrrotvec(a,b): a = normalize(a) b = normalize(b) ax = normalize(np.cross(a,b)) angle = np.arccos(np.minimum(np.dot(a,b),[1])) if not np.any(ax): absa = np.abs(a) mind = np.argmin(absa) c = np.zeros((1,3)) c[mind] = 0 ax = normalize(np.cross(a,c)) r = np.concatenate((ax,angle)) return r
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def svecRotate(v, T):\n \n return svec(Rotate(smat(v), T))", "def _rot(theta, vec):\n\n rmat = scipy.array([[scipy.cos(theta), -1*scipy.sin(theta)],\n [scipy.sin(theta), scipy.cos(theta)]]) \n return scipy.dot(rmat,vec)", "def rotate_vectors(q, vec):\n rot_vec = []\n fo...
[ "0.73906195", "0.7337561", "0.72559744", "0.70578516", "0.702117", "0.6936926", "0.69332725", "0.6840454", "0.68362135", "0.6819142", "0.6817099", "0.67670673", "0.6754099", "0.6747304", "0.67367476", "0.6716166", "0.6703668", "0.6696891", "0.66868734", "0.6665173", "0.660386...
0.7442148
0
Convert the axisangle representation to the matrix representation of the rotation
def vrrotvec2mat(r): s = np.sin(r[3]) c = np.cos(r[3]) t = 1 - c n = normalize(r[0:3]) x = n[0] y = n[1] z = n[2] m = np.array( [[t*x*x + c, t*x*y - s*z, t*x*z + s*y], [t*x*y + s*z, t*y*y + c, t*y*z - s*x], [t*x*z - s*y, t*y*z + s*x, t*z*z + c]] ) return m
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def axis2rotmat(axis):\n return quat2rotmat(axis2quat(axis))", "def rotation_matrix(angle, axis):\n about_z = rotation_about_z(angle)\n z_to_axis = z_to_vector(axis)\n axis_to_z = np.linalg.inv(z_to_axis)\n return reduce(np.dot, [z_to_axis, about_z, axis_to_z])", "def angle_to_rotation_matrix(an...
[ "0.81078196", "0.78859186", "0.74368423", "0.7374491", "0.7312507", "0.7310099", "0.7273846", "0.7225561", "0.71868175", "0.71868175", "0.70643413", "0.70557725", "0.70138526", "0.7002238", "0.6987983", "0.6980551", "0.69750994", "0.6967742", "0.69532573", "0.693683", "0.6926...
0.0
-1
Sort the buses reversed by their period, having tagged them with their position in the sequence, which is their c value. >>> list(prep_input(EXAMPLE_BUSES)) [(59, 4), (31, 6), (19, 7), (13, 1), (7, 0)]
def prep_input(buses): return sorted([(bus, offset) for offset, bus in enumerate(buses) if bus], reverse=True)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def process_bc_freqs(bc_freqs):\r\n\r\n bcs_list = []\r\n for curr_key in bc_freqs.keys():\r\n bcs_list.append((curr_key, int(bc_freqs[curr_key])))\r\n\r\n bcs_list = sorted(bcs_list, key=itemgetter(1), reverse=True)\r\n\r\n sorted_bcs = []\r\n for curr_bc in bcs_list:\r\n sorted_bcs.a...
[ "0.5366377", "0.5326399", "0.532053", "0.5228897", "0.5217887", "0.5176579", "0.5169902", "0.51473904", "0.513209", "0.5101623", "0.50995326", "0.5060153", "0.50267196", "0.5010616", "0.5007883", "0.4982695", "0.49244604", "0.49026328", "0.49025616", "0.4894752", "0.48676687"...
0.6246531
0
period of combined signal is lcm of the periods of its components >>> lcm(3, 9) 9 >>> lcm(4, 9) 36
def lcm(x, y): return x*y//gcd(x,y)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def lcm(a, b):\n\treturn a * b // gcm(a, b)", "def lcm(a, b):\r\n return a * b / fr.gcd(a, b)", "def lcm(a, b):\r\n return a * b // gcd(a, b)", "def lcm(a, b):\n return a * b // gcd(a, b)", "def lcm(a, b):\n return a * b // gcd(a, b)", "def lcm(a, b):\n return a * b // gcd(a, b)", "def l...
[ "0.71418875", "0.69032246", "0.6865901", "0.6818754", "0.6818754", "0.6818754", "0.6818754", "0.6818754", "0.6818754", "0.6818754", "0.6818754", "0.6818754", "0.67949474", "0.67771405", "0.67771405", "0.6775764", "0.67721605", "0.67721605", "0.67721605", "0.665062", "0.660152...
0.6627561
20
>>> combine_signals((5,2), (3,1)) (15, 7) >>> combine_signals((3,1), (2,0)) (6, 4) >>> combine_signals((13,1),(12,0)) (156, 144)
def combine_signals(longer,shorter): T_0, c_0 = longer T_1, c_1 = shorter # Period is the lcm of the provided periods T_result = lcm(T_0,T_1) # Determine phase by searching soutions of longer that fall between the # start position and start + T_result for i in range(T_0-c_0,T_result+c_0,T_0): v0 = (i + c_0) % T_0 v1 = (i + c_1) % T_1 if not( v0 or v1): return (T_result,T_result-i)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def combine_signals(series1: pd.Series, series2: pd.Series) -> pd.Series:\n return ((np.sign(series1) == np.sign(series2)) * series1).astype(int, copy=False)", "def solve_buses(prepared_buses):\n T, c = functools.reduce(combine_signals, prepared_buses)\n return T - c", "def pick_signals(processor, sou...
[ "0.60273904", "0.57812446", "0.5720774", "0.5197646", "0.5101901", "0.50511295", "0.49429286", "0.49002105", "0.48530227", "0.48525456", "0.48228434", "0.48220006", "0.48124447", "0.47445247", "0.4741653", "0.47265753", "0.46585917", "0.4639718", "0.46313864", "0.46211886", "...
0.5614806
3
Reduce a bunch of periodic signals to a single signal. The value of x that answers the puzzle is the first place ( c + x ) % T = 0, that is to say, c + x = T, or x = Tc. >>> solve_buses(prep_input(EXAMPLE_BUSES)) 1068781
def solve_buses(prepared_buses): T, c = functools.reduce(combine_signals, prepared_buses) return T - c
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def solution2(inp):\n inp = get_lines(inp)\n notes = inp[1].split(\",\")\n\n offsets = {}\n for i, bus in enumerate(notes):\n if bus == 'x':\n continue\n bus = int(bus)\n offsets[bus] = i\n buses = set(offsets)\n old_buses = buses.copy()\n\n def search(bus, offs...
[ "0.5621422", "0.5476572", "0.5280086", "0.5161614", "0.5074294", "0.49908358", "0.49579656", "0.48839134", "0.48619267", "0.48579392", "0.48389107", "0.48320952", "0.4826791", "0.48213837", "0.48160282", "0.4811936", "0.48002857", "0.4795177", "0.47753403", "0.4768853", "0.47...
0.76751333
0
Generator over all subclasses of a given class, in depth first order.
def itersubclasses(cls, _seen=None): if not isinstance(cls, type): raise TypeError('itersubclasses must be called with ' 'new-style classes, not %.100r' % cls) if _seen is None: _seen = set() try: subs = cls.__subclasses__() except TypeError: # fails only when cls is type subs = cls.__subclasses__(cls) for sub in subs: if sub not in _seen: _seen.add(sub) yield sub for sub in itersubclasses(sub, _seen): yield sub
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def all_subclasses(cls):\n for subclass in cls.__subclasses__():\n yield subclass\n for subc in all_subclasses(subclass):\n yield subc", "def class_hierarchy(clslist):\n for cls in clslist:\n subclass_list = cls.__subclasses__()\n if subclass_list:\n ...
[ "0.7962573", "0.79427534", "0.7785052", "0.75617856", "0.7538503", "0.75220305", "0.7414212", "0.7337148", "0.7327652", "0.7236801", "0.70275396", "0.7026855", "0.7007763", "0.6963463", "0.6906831", "0.688978", "0.688137", "0.6830467", "0.68002534", "0.67773235", "0.6738663",...
0.7340427
7
Might be a useful helper
def _get_belt(self, new_score): for score in reversed(scores): if new_score >= score: return BELTS[score].capitalize() return None
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def regular(self):", "def sth():", "def substantiate():", "def __call__():", "def __call__():", "def __call__():", "def __call__():", "def __call__():", "def exo2():", "def support(self):", "def __call__(self) -> None:", "def common(self):", "def fn():", "def func():", "def _prepare(sel...
[ "0.61169916", "0.5881199", "0.5851093", "0.5847512", "0.5847512", "0.5847512", "0.5847512", "0.5847512", "0.5819249", "0.5811447", "0.579026", "0.5758938", "0.57119006", "0.56060624", "0.55889446", "0.55889446", "0.5582659", "0.5582659", "0.5576801", "0.55566376", "0.55192375...
0.0
-1
Method counting photos and creatig list of subfolders with images.
def my_root_listdir(root_dir): root_listdir = [ images_dir for images_dir in os.listdir(root_dir) if not any( characters in images_dir for characters in [".", "test", "train", "valid"] ) ] summ = 0 for images_dir in root_listdir: summ += len(os.listdir(root_dir + "/" + images_dir)) / 2 - 2 print("Sum of images in directories: ", int(summ)) return root_listdir
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def appendpics(pathofimg, w_sub, h_sub, step):\n num = 0\n dirlist = []\n images = [] # images in each folder\n for root, dirs, fileswer in os.walk(pathofimg):\n if len(dirs)!= 0:\n for dir in dirs:\n dirlist.append(dir)\n for rooert, dirwerwes, files in...
[ "0.69187784", "0.6491537", "0.6410648", "0.63387346", "0.63301575", "0.60764897", "0.6062333", "0.6053507", "0.60286975", "0.6016352", "0.59886175", "0.58583134", "0.58411384", "0.5840281", "0.5804456", "0.57886666", "0.57884914", "0.5774842", "0.57680684", "0.5757683", "0.57...
0.5316852
77
Method opening all images to test their validity.
def verify_images(root_dir, root_listdir): counter = 0 for index, image_dir in enumerate(root_listdir): images_listdir = os.listdir(root_dir + "/" + image_dir) list_of_images_indices = [ image_index for image_index in range(3, len(images_listdir) - 1) if image_index % 2 == 0 ] for image_ind in list_of_images_indices: filename = root_dir + "/" + image_dir + "/" + images_listdir[image_ind] try: im = Image.open(filename) im.verify() im.close() except (OSError, ValueError): counter += 1 print("%d files caused error due to OSError and ValueError." % counter)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_all_images(self):\n self.roses.save_image()\n all_images = Images.get_all_images()\n self.assertTrue(len(all_images)<1)", "def load_from_images(self):\n logging.debug(\"load_from_images called\")\n return True", "def images_exist(self):\n pass", "def test_rea...
[ "0.6742113", "0.62553185", "0.6229397", "0.6169011", "0.61681324", "0.6136686", "0.60898274", "0.60487515", "0.5997627", "0.5935533", "0.58759665", "0.58210576", "0.5768145", "0.5766127", "0.5718156", "0.5691568", "0.5679888", "0.56696707", "0.5667723", "0.5602127", "0.557296...
0.663587
1
for a given template and list of extensions, find every file related to that template which has one of the extensions.
def find_template_companion_files(template: Path, extensions: Iterable[str], recurse_up_to: Path = None) -> Set[Path]: files_to_check = [] # Get a list of all file names to look for in each folder data_file_names = [] basename = template.name.split('.')[0] for i in range(len(template.suffixes)): ext = ''.join(template.suffixes[:i+1]) for data_file_ext in extensions: data_file_names.append(Path(basename + ext).with_suffix(data_file_ext)) # Look for those files in the template's current folder (a.k.a. parent directory) files_to_check.extend([template.parent / file_name for file_name in data_file_names]) if recurse_up_to and recurse_up_to in template.parents: # Look for those files in every parent directory up to `recurse_up_to`, # excluding the template's parent directory which has already been checked relative_path = template.parent.relative_to(recurse_up_to) for folder in relative_path.parents: for file in data_file_names: files_to_check.append(recurse_up_to / folder / file) return set([file for file in files_to_check if file.is_file()])
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def filter_files_by_extension(\n files: list ,\n extensions: list\n):\n filtered_files = []\n for file in files:\n file_ext = os.path.splitext(file)[-1].lower()\n file_ext = _remove_dot_from_extension(file_ext)\n for extension in extensions:\n ext = _remove_dot_from_exte...
[ "0.6474418", "0.63438606", "0.63118434", "0.6272479", "0.6263339", "0.6136152", "0.6116507", "0.6069046", "0.60550666", "0.6040801", "0.599823", "0.5954619", "0.5945622", "0.59430933", "0.5937887", "0.58905417", "0.58869183", "0.5864783", "0.58605796", "0.58385235", "0.581291...
0.74726915
0
The core component of this software is the Yasha class. When used as a commandline tool, a new instance will be create with each invocation. When used as a library, multiple different instances can be created with different configurations
def __init__(self, root_dir: Path = Path('.'), variable_files: List[Union[Path,str]] = list(), inline_variables = dict(), yasha_extensions_files: List[Union[Path,str]] = list(), template_lookup_paths: List[Union[Path,str]] = list(), mode: Union[Literal['pedantic'], Literal['debug'], None] = None, encoding: str = ENCODING, **jinja_configs): self.root = root_dir self.parsers = PARSERS.copy() self.template_lookup_paths = [Path(p) for p in template_lookup_paths] self.yasha_extensions_files = [Path(p) for p in yasha_extensions_files] self.variable_files = [Path(f) for f in variable_files] self.encoding = encoding self.env = Environment() if mode == 'pedantic': self.env.undefined = StrictUndefined if mode == 'debug': self.env.undefined = DebugUndefined self.env.filters.update(FILTERS) self.env.tests.update(TESTS) for jinja_extension in CLASSES: self.env.add_extension(jinja_extension) if jinja_configs: for config, value in jinja_configs.items(): setattr(self.env, config, value) for ext in self.yasha_extensions_files: self._load_extensions_file(ext) self.env.loader = FileSystemLoader(self.template_lookup_paths) self._load_data_files(self.variable_files) # data from the data files becomes the baseline for jinja global vars self.env.globals.update(inline_variables) # data from inline variables / directly-specified global variables overrides data from the data files
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def main(cls):\n raise NotImplementedError", "def __init__(self):\r\n self._config = Config.load()\r\n self._bootstrap_jar_url = self._config.get('ivy', 'bootstrap_jar_url',\r\n default=self._DEFAULT_URL)\r\n self._timeout = Amount(self._config.getint...
[ "0.6384319", "0.5974639", "0.59633154", "0.59085184", "0.58305883", "0.58266973", "0.5822428", "0.58058023", "0.5801817", "0.5789056", "0.57864296", "0.57859546", "0.5772526", "0.5756182", "0.5749882", "0.5728382", "0.57279474", "0.57279474", "0.57224405", "0.57149786", "0.56...
0.0
-1
Render a single template
def render_template(self, template: Union[Path, str], find_data_files = True, find_extension_files = True, jinja_env_overrides = dict(), output: BinaryIO = None) -> Union[str, BinaryIO]: if isinstance(template, Path): # Automatic file lookup only works if template is a file. # If template is a str (like, for example, something piped in to Yasha's STDIN), then don't bother trying to find related files if find_extension_files: # load extension files related to this template, updating the local env and the local parsers dict extension_files = find_template_companion_files(template, EXTENSION_FILE_FORMATS, self.root) for ext in extension_files: self._load_extensions_file(ext) if find_data_files: # load variable files related to this template, merging their variables into the local env's globals object data_files = find_template_companion_files(template, self.parsers.keys(), self.root) self._load_data_files(data_files) # Add the template's directory to the template loader's search path self.env.loader.searchpath.append(template.parent) # type: ignore # Read the template string from the template path template_text = template.read_text() else: template_text = template for k, v in jinja_env_overrides: setattr(self.env, k, v) if output: # Don't return the rendered template, stream it to a file compiled_template: TemplateStream = self.env.from_string(template_text).stream() compiled_template.enable_buffering(5) compiled_template.dump(output, encoding=self.encoding) return output else: return self.env.from_string(template_text).render()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def render(request, template):\r\n return render_to_response('static_templates/' + template, {})", "def render(self, template: str, **vars) -> str:", "def render(self, _template, **context):\n context['_request'] = self.request\n self.response.write(self.jinja2.render_template(_template, **con...
[ "0.73298955", "0.72438174", "0.71931773", "0.7177177", "0.7177177", "0.7177177", "0.7177177", "0.7177177", "0.7039566", "0.70391536", "0.6924555", "0.69022024", "0.68838", "0.68724924", "0.6871491", "0.6865003", "0.6862738", "0.68552256", "0.6819078", "0.67653996", "0.6763914...
0.0
-1
When rendering or working with multiple template files, we load extension files related to those templates, which alters the environment, and we add each template's parent directory to the template loader search path, which also alters the environment. That means processing one template with a Yasha instance alters the behaviour of the Yasha instance for all future templates processed. To avoid this, we creste an isolated jinja environment for each template from the Yasha instance's base environment.
def _make_isolated_env_for_template(self, template: Union[Path, str]) -> Environment: if isinstance(template, str): # string tempaltes have no associated files, and therefore don't alter the environment. They can use the base environment directly return self.env # Deplicate the base env, but replace references to dictionaries in the base env with copies of those dictionaries env: Environment = self.env.overlay() # globals can be a nested data structure, so it must be deep copied env.globals = deepcopy(env.globals) # filters and tests can be shallow-copied env.filters = env.filters.copy() env.tests = env.tests.copy() # create a new filesystem loader searchpath = env.loader.searchpath.copy() # type: ignore env.loader = FileSystemLoader(searchpath=searchpath) return env
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def init_templates( path=\"boilerplate\" ):\n global template_env\n template_loader = jinja2.FileSystemLoader(searchpath=\"boilerplate\" )\n template_env = jinja2.Environment(\n loader=template_loader,\n lstrip_blocks=True\n )", "def _buildjinja2_templates(self):\n templates ...
[ "0.71863306", "0.68339425", "0.68019456", "0.64534956", "0.6438727", "0.64271533", "0.63226676", "0.62789506", "0.61880404", "0.61768335", "0.6093075", "0.6058683", "0.60580313", "0.60333896", "0.59724724", "0.59577113", "0.5878471", "0.5849247", "0.5840072", "0.5812187", "0....
0.5354332
61
Produces a list of all files that the rendering of this template depends on, including files referenced within {% include %}, {% import %}, and {% extends %} blocks within the template
def get_makefile_dependencies(self, template: Union[Path, str]) -> List[Path]: if isinstance(template, Path): template = template.read_text() dependencies = self.variable_files + self.yasha_extensions_files referenced_template_partials = find_referenced_templates(self.env.parse(template)) # returns a generator # convert the generator to a list, filtering out the None values referenced_template_partials: List[str] = list(filter(bool, referenced_template_partials)) for relative_path in referenced_template_partials: for basepath in self.env.loader.searchpath: # type: ignore if not isinstance(basepath, Path): basepath = Path(basepath) template_path = basepath / relative_path if template_path.is_file: # we've found the template partial inside this basepath dependencies.append(template_path) return dependencies
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_imports() -> str:\n extension = \"\"\n for js_ in JS_FILES.values():\n extension += f'<script src=\"{js_}\"></script>'\n for css in CSS_FILES.values():\n extension += f'<link rel=\"stylesheet\" href=\"{css}\" is=\"custom-style\">'\n\n return extension", "...
[ "0.6667309", "0.649749", "0.64170736", "0.63219726", "0.6255717", "0.61782795", "0.6177389", "0.61124706", "0.60708463", "0.60316014", "0.6030461", "0.6025385", "0.60198873", "0.598662", "0.59268177", "0.5915987", "0.58921176", "0.58828634", "0.58777714", "0.5809368", "0.5792...
0.6414305
3
Distributes an archive to your web servers
def do_deploy(archive_path): if not os.path.exists(archive_path): return False else: try: put(archive_path, "/tmp/") filename = archive_path.split('/') no_ext = filename[-1].split('.') archive = no_ext[0] run("mkdir -p /data/web_static/releases/" + archive + "/") run("tar -zxf /tmp/" + filename[1] + " -C /data/web_static/releases/" + archive + "/") run("rm /tmp/" + filename[1]) run("mv /data/web_static/releases/" + archive + "/web_static/* /data/web_static/releases/" + archive + "/") run("rm -rf /data/web_static/releases/" + archive + "/web_static") run("rm -rf /data/web_static/current") run("ln -s /data/web_static/releases/" + archive + "/ /data/web_static/current") print("New version deployed!") return True except: return False
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def do_pack():\n\n now = datetime.now()\n # format the name of the file with the timestamps\n now_year = now.year\n now_month = now.month\n now_day = now.day\n now_hour = now.hour\n now_minute = now.minute\n now_second = now.second\n # apply the format\n file_name = 'versions/web_stat...
[ "0.7481866", "0.7308439", "0.717865", "0.70625347", "0.70540977", "0.6997107", "0.69896996", "0.6985268", "0.6972793", "0.6966511", "0.6944373", "0.69133264", "0.68920314", "0.6885152", "0.68583626", "0.68551016", "0.6854341", "0.68525314", "0.68385327", "0.681433", "0.678784...
0.6309926
65
To search and replace the text config string the file
def search_and_replace_config(param_dict, config_file): if param_dict is None: raise ValueError('could not find parameters to update the configuration: %s' % param_dict) if config_file is None: raise ValueError('could not find config file to update the configuration: %s' % config_file) search_users_config = param_dict['search_users_config'] replace_users_config = param_dict['replace_users_config'] search_processes_config = param_dict['search_processes_config'] replace_processes_config = param_dict['replace_processes_config'] search_users_config_ubuntu = param_dict['search_users_config_ubuntu'] replace_users_config_ubuntu = param_dict['replace_users_config_ubuntu'] search_processes_config_ubuntu = param_dict['search_processes_config_ubuntu'] replace_processes_config_ubuntu = param_dict['replace_processes_config_ubuntu'] print ("File to perform search and replace on: %s" % config_file) modify_config_file(param_dict, config_file, search_users_config, replace_users_config) modify_config_file(param_dict, config_file, search_processes_config, replace_processes_config) modify_config_file(param_dict, config_file, search_users_config_ubuntu, replace_users_config_ubuntu) modify_config_file(param_dict, config_file, search_processes_config_ubuntu, replace_processes_config_ubuntu)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def modify_config_file(config_file, search_config, replace_config):\n with open(config_file, 'r+') as f:\n content = f.read()\n f.seek(0)\n f.write(content.replace(search_config, replace_config))\n f.truncate()\n f.close()", "def replace_includes(self, file_name):\n\n ...
[ "0.7213954", "0.6664111", "0.6658277", "0.644173", "0.63886225", "0.6382801", "0.6320942", "0.6153013", "0.60404235", "0.6038611", "0.6027211", "0.592698", "0.5926587", "0.5894178", "0.58916545", "0.5873578", "0.58602273", "0.5843004", "0.5838578", "0.57836527", "0.57670426",...
0.6405321
4
To read the config file, modify the threshold content and truncate the file
def modify_config_file(config_file, search_config, replace_config): with open(config_file, 'r+') as f: content = f.read() f.seek(0) f.write(content.replace(search_config, replace_config)) f.truncate() f.close()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def reten_log(path):\n try:\n file = open(path, 'r+')\n lines = file.readlines()\n if lines > 200:\n file.truncate()\n file.close()\n else:\n file.close()\n except:\n pass", "def __write_thresholds_off_config(self, path):\n self.__w...
[ "0.63270384", "0.60728574", "0.57777673", "0.5709095", "0.52786064", "0.51557845", "0.5135187", "0.5053703", "0.5041154", "0.5027755", "0.5015283", "0.5009702", "0.49824095", "0.49750274", "0.49732378", "0.49518126", "0.49454418", "0.49447972", "0.49405065", "0.49233353", "0....
0.55548596
4
Return sny vowels founded in a supplied word.
def search4vowels(word): vowels = set('aeiou') found = vowels.intersection(set(word)) #return found for vowels in found: print(vowels)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _rv_standard(self, word, vowels):\n rv = \"\"\n if len(word) >= 2:\n if word[1] not in vowels:\n for i in range(2, len(word)):\n if word[i] in vowels:\n rv = word[i + 1 :]\n break\n\n elif word[0...
[ "0.74471885", "0.7248238", "0.71690327", "0.68447524", "0.6721554", "0.670467", "0.6704543", "0.64083433", "0.6355867", "0.6346289", "0.6267494", "0.62225914", "0.6183491", "0.61348367", "0.60796964", "0.604393", "0.60383445", "0.603059", "0.59944683", "0.5991076", "0.5983007...
0.6320444
10
Constructor. Create the settings objects
def __init__(self): self.s = QSettings() self.p = QgsProject.instance()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __init__( settings={} ):", "def __init__(self, settings):\n\n # store settings\n self.settings = settings", "def __init__(self, settings):\n \n # storing otmbs settings\n self.settings = settings", "def __init__(self, settings):\n self._settings = settings", "d...
[ "0.81398326", "0.8127764", "0.81223667", "0.8024526", "0.75850004", "0.7424904", "0.7297364", "0.727927", "0.7227605", "0.7210535", "0.7177579", "0.7173619", "0.71518964", "0.7086663", "0.695417", "0.6928098", "0.69154674", "0.6878284", "0.6834534", "0.6787074", "0.67606455",...
0.6513789
38
Transform x elementwise through an affine function y = exp(s)x + t where s = st[...,0] and t = st[...,1] with s.shape == x.shape == t.shape The Jacobian for this transformation is the coordinatewise product of the scaling factors J = prod(es[...,i],i)
def element_wise_affine(x, st, compute_jacobian=True): es = torch.exp(st[..., 0]) t = st[..., 1] logj = None if compute_jacobian: logj = torch.sum(torch.log(es), dim=-1) return es * x + t, logj
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def inverse_element_wise_affine(x, st, compute_jacobian=True):\n es = torch.exp(-st[..., 0])\n t = st[..., 1]\n logj = None\n if compute_jacobian:\n logj = torch.sum(torch.log(es), dim=-1)\n\n return es * (x - t), logj", "def affine(params, x):\n return np.dot(params['w'], x) + params['b']...
[ "0.7174713", "0.6328652", "0.6287944", "0.6152823", "0.6144677", "0.5968129", "0.5957975", "0.5921042", "0.5909272", "0.5798729", "0.5789589", "0.5777053", "0.56403214", "0.5585435", "0.55693203", "0.5553606", "0.5544687", "0.5482424", "0.5461267", "0.54540503", "0.54315287",...
0.8046413
0
Transform x elementwise through an affine function y = exp(s)(x t) where s = st[...,0] and t = st[...,1] with s.shape == x.shape == t.shape This is the inverse of `element_wise_affine` above for the same set of parameters st The Jacobian for this transformation is the coordinatewise product of the scaling factors J = prod(es[...,i],i)
def inverse_element_wise_affine(x, st, compute_jacobian=True): es = torch.exp(-st[..., 0]) t = st[..., 1] logj = None if compute_jacobian: logj = torch.sum(torch.log(es), dim=-1) return es * (x - t), logj
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def element_wise_affine(x, st, compute_jacobian=True):\n es = torch.exp(st[..., 0])\n t = st[..., 1]\n logj = None\n if compute_jacobian:\n logj = torch.sum(torch.log(es), dim=-1)\n\n return es * x + t, logj", "def transform(fn):\n def _(vec, dt):\n return np.einsum(\n 'ji,...
[ "0.83168423", "0.6524651", "0.6448697", "0.6208682", "0.6185584", "0.60859853", "0.60077345", "0.6005692", "0.6002416", "0.585732", "0.5842322", "0.57764435", "0.5741281", "0.5718799", "0.57156426", "0.57109356", "0.5676722", "0.56560165", "0.5641633", "0.56377053", "0.563442...
0.77893174
1
Softmax loss function, naive implementation (with loops) Inputs have dimension D, there are C classes, and we operate on minibatches of N examples.
def softmax_loss_naive(W, X, y, reg): # Initialize the loss and gradient to zero. loss = 0.0 dW = np.zeros_like(W) ############################################################################# # TODO: Compute the softmax loss and its gradient using explicit loops. # # Store the loss in loss and the gradient in dW. If you are not careful # # here, it is easy to run into numeric instability. Don't forget the # # regularization! # ############################################################################# num_train = X.shape[0] num_classes = W.shape[1] # Calculate loss for each example f = np.zeros((num_train, num_classes)) f_max = np.zeros((num_train, 1)) for i in xrange(num_train): for j in xrange(num_classes): f[i, j] = np.dot(X[i, :], W[:, j]) if f[i, j] > f_max[i]: f_max[i] = f[i, j] exp_f = np.zeros_like(f) sum_exp_f = np.zeros((num_train, 1)) for i in xrange(num_train): for j in xrange(num_classes): f[i, j] -= f_max[i] exp_f[i, j] = math.exp(f[i, j]) sum_exp_f[i] += exp_f[i, j] for i in xrange(num_train): loss += -math.log(exp_f[i, y[i]] / sum_exp_f[i]) loss /= num_train # Calculate regularization term reg_term = 0.0 for i in xrange(W.shape[0]): for j in xrange(W.shape[1]): reg_term += W[i, j]**2 loss += reg * reg_term # Calculate gradient P = np.zeros((num_train, num_classes)) for i in xrange(num_train): for j in xrange(num_classes): P[i, j] = exp_f[i, j] / sum_exp_f[i] P[i, y[i]] -= 1 for i in xrange(dW.shape[0]): for j in xrange(dW.shape[1]): dW[i, j] = 1 / num_train * np.dot(X[:, i].T, P[:, j]) ############################################################################# # END OF YOUR CODE # ############################################################################# return loss, dW
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def softmax_classifier(W, input, label, lamda):\n\n ############################################################################\n # TODO: Put your code here\n\n loss = 0.0\n num_train = input.shape[0]\n num_classes = W.shape[1]\n\n score = np.dot(input, W) # (N,C)\n prediction = np.argmax(sco...
[ "0.77309406", "0.76029754", "0.75815004", "0.7345373", "0.7316555", "0.7265773", "0.72579056", "0.72342706", "0.7226934", "0.72213393", "0.7203286", "0.71891123", "0.713433", "0.71218145", "0.7092656", "0.7017707", "0.69664174", "0.6941942", "0.693169", "0.69267535", "0.69202...
0.6760264
36