query
stringlengths
9
3.4k
document
stringlengths
9
87.4k
metadata
dict
negatives
listlengths
4
101
negative_scores
listlengths
4
101
document_score
stringlengths
3
10
document_rank
stringclasses
102 values
Access the run at a given index. This is required by QtQuick
def data(self, index, role=Qt.DisplayRole): if not index.isValid(): return QVariant() run = self._runs[index.row()] if role == Qt.DisplayRole: return run return QVariant()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get(self, i):\n return self._runs[i]", "def run(self, run_idx):\n return self._h5['{}/{}'.format(RUNS, int(run_idx))]", "def run(self, run_number):\n return self[self.run_cache[run_number]]", "def __getitem__(self, index):\n # NOTE: this automatically supports slicing :-)\n ...
[ "0.72931045", "0.6952865", "0.6678343", "0.6282501", "0.61454105", "0.60703266", "0.60688543", "0.60501313", "0.60471225", "0.59921044", "0.5978085", "0.59680146", "0.59228444", "0.58857644", "0.5878358", "0.5874778", "0.5870897", "0.5781742", "0.5779231", "0.5774915", "0.575...
0.6371707
3
Update the data at a given index. This is required by QtQuick
def setData(self, index, value, role=Qt.EditRole): if not index.isValid(): return False if role == Qt.Edit: self._runs[index.row()] = value return True return False
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def updateRow(self, index: int) -> None:\n ...", "def set(self, index, data):\n self.data[index] = data", "def set_at_index(self, index: int, value: object) -> None:\n self.data[index] = value", "def _update_value_at(self, index, value):\n node = self._get_node_at(index)\n if n...
[ "0.7554738", "0.7046055", "0.70130014", "0.6715155", "0.6603428", "0.6596355", "0.6492101", "0.6436119", "0.63908285", "0.6352602", "0.63500535", "0.6314473", "0.6250634", "0.62338316", "0.62338316", "0.62338316", "0.62338316", "0.622466", "0.62043625", "0.6203889", "0.619686...
0.0
-1
A description of the model properties required by QtQuick
def flags(self, index): if not index.isValid(): return Qt.ItemIsEditable return Qt.ItemIsEnabled | Qt.ItemIsEditable
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def properties(self):\n pass", "def properties(self):", "def properties(self):", "def properties(self):", "def properties(self):\n raise NotImplementedError", "def properties(self):\n return None", "def properties(self):\n return None", "def get_properties(self):\n retu...
[ "0.69124717", "0.6807334", "0.6807334", "0.6807334", "0.67613196", "0.66567373", "0.66567373", "0.66386944", "0.6318433", "0.6283687", "0.6269547", "0.625697", "0.62384415", "0.6229266", "0.620358", "0.61727464", "0.61452335", "0.6096329", "0.6087369", "0.6087369", "0.6087369...
0.0
-1
Create a new run starting at the given coordinates
def append(self, startx, starty): if self._runs: angles = self._runs[-1]._angles pos = self._runs[-1]._position else: angles = [0] pos = None run = SingleRun(self, startx, starty, angles=angles, position=pos) self.beginInsertRows(QModelIndex(), len(self._runs), len(self._runs)) self._runs.append(run) self.endInsertRows() self.scriptChanged.emit() self.validChanged.emit()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def build(self, coordinates = None):\n\n # start- and endpoints of lines are nodes, but they do not need to have a point object associated to them\n # in this case, point coordinates should be set\n if (self.geo):\n coordinates = rs.PointCoordinates(self.geo)\n\n self.x = rou...
[ "0.5933312", "0.58285725", "0.5704058", "0.5686776", "0.5686776", "0.56424886", "0.56335765", "0.5611578", "0.5542422", "0.5480548", "0.54717386", "0.54646283", "0.5367157", "0.533819", "0.5320714", "0.52867836", "0.52701634", "0.52323407", "0.5213995", "0.5198065", "0.518935...
0.5322073
14
Change the ending coordinates of the most recent run
def update(self, x, y): delta_x = x-self._runs[-1]._x # pylint: disable=W0212 delta_y = y-self._runs[-1]._y # pylint: disable=W0212 if abs(delta_x) > abs(delta_y): self._runs[-1]._vertical = False # pylint: disable=W0212 self._runs[-1]._length = delta_x # pylint: disable=W0212 else: self._runs[-1]._vertical = True # pylint: disable=W0212 self._runs[-1]._length = delta_y # pylint: disable=W0212 i = len(self._runs) - 1 self.dataChanged.emit(self.index(i, 0), self.index(i, 0)) self.scriptChanged.emit()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _set_end(self, coordinates):\n self._end = coordinates", "def end(self):\n self.set_initial_offset(1e6)", "def set_finishing_pos(self, finish):\n if finish and self.is_unoccupied(*finish):\n self.finish_pos = finish[:]\n else:\n self.set_random_pos('finishi...
[ "0.6409029", "0.6067654", "0.6002653", "0.59474105", "0.5929103", "0.5818964", "0.5757306", "0.5729312", "0.5727549", "0.5715503", "0.56266606", "0.56220305", "0.5599335", "0.55922794", "0.5553596", "0.55360717", "0.55360717", "0.5516263", "0.54933006", "0.5479724", "0.546685...
0.0
-1
Access a single run.
def get(self, i): return self._runs[i]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def run(self, run_number):\n return self[self.run_cache[run_number]]", "def get_run(self, run_id: str) -> sqlite3.Row:\n with self.table_access_condition:\n conn = self._get_connection()\n c = conn.cursor()\n c.execute(\n \"\"\"\n SELEC...
[ "0.7563216", "0.75471956", "0.74779475", "0.7438003", "0.6921127", "0.690785", "0.6889391", "0.6586346", "0.6453123", "0.64372474", "0.6409679", "0.632492", "0.6297225", "0.62775546", "0.6272261", "0.6258751", "0.62365556", "0.6131638", "0.6095218", "0.60870063", "0.6036361",...
0.6402159
11
The instrument script the performs the requested runs
def script(self): temp = "\n\n".join([r.script_line( self._angle_command, self._horizontal_command, self._vertical_command, self._origin, self._frame_width, self._frame_height) for r in self._runs]) return temp
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def run_experiment():\n pass", "def run(self):\n self.run_measurement()\n self.run_analysis()\n if self.get_param_value('update'):\n self.run_update()", "def run_script(self):\n pass", "def run(self,measurements,actions):\n raise NotImplementedError", "def r...
[ "0.68662244", "0.68151057", "0.6767565", "0.65983254", "0.6565703", "0.65549105", "0.6449239", "0.6410411", "0.6353265", "0.6352231", "0.63458776", "0.6331616", "0.6285805", "0.6264376", "0.62491834", "0.6246712", "0.62430114", "0.62197703", "0.621477", "0.620736", "0.6206820...
0.0
-1
Can the current model be exported into a usable script
def valid(self): if not self._runs: return False return all([r.valid for r in self._runs])
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def dump_model(self):", "def export_model(self, save_path: str, save_format: Optional[str] = None) -> None:", "def save_model(self, filename):\r\n pass", "def export_model(model, name):\n\tpath = \"data/{}/\".format(name)\n\tfilename = \"{}.model\".format(name)\n\tif os.path.isdir(path):\n\t\tprint(\"mode...
[ "0.6738641", "0.66990805", "0.6495466", "0.6315162", "0.62201536", "0.61410815", "0.6110016", "0.6058837", "0.60287017", "0.5983388", "0.5955291", "0.59451455", "0.5926416", "0.5924503", "0.5913353", "0.58991826", "0.5894664", "0.5845883", "0.5842756", "0.58416146", "0.583656...
0.0
-1
A helper function to perform a 3 year moving window filter for a single land cover value (such as Forest as 1) for one three year window representing year(i1), year(i), year(i+1) annual land cover classifications. This function applies on one window, and should only be called using the function applyWindow3years. The image bands do not need to be in order, but the bandNames argument must be in chronological order. The temporal filter inspects the central position of three consecutive years, and if the extremities of the consecutive years are identical but the centre position is not, then the central pixels are reclassified to match its temporal neighbour class. This function can be applied to whichever land cover values the user decides, such as all of the land cover values or a select few.
def mask3(imagem, value, bandNames): mask = imagem.select(bandNames[0]).eq(value) \ .bitwiseAnd(imagem.select(bandNames[1]).neq(value)) \ .bitwiseAnd(imagem.select(bandNames[2]).eq(value)) change_img = imagem.select(bandNames[1]).mask(mask.eq(1)).where(mask.eq(1), value) img_out = imagem.select(bandNames[1]).blend(change_img) return img_out
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def applyWindow3years(imagem, value, bandNames):\n img_out = imagem.select(bandNames[0])\n for i in np.arange(1, len(bandNames)-1):\n img_out = img_out.addBands(mask3(imagem, value,bandNames[(i-1):(i+2)]))\n img_out = img_out.addBands(imagem.select(bandNames[-1]))\n return img_out", "def apply...
[ "0.77075994", "0.6490901", "0.642566", "0.5768101", "0.5706588", "0.548362", "0.5444088", "0.52517235", "0.50446564", "0.50012696", "0.49663457", "0.4957738", "0.49230793", "0.47313127", "0.4699657", "0.46902734", "0.4674825", "0.46389544", "0.45470658", "0.45407534", "0.4527...
0.46771812
16
A helper function to perform a 4 year moving window filter for a single land cover value (such as Forest as 1) for one four year window representing year(i1), year(i), year(i+1), and year(i+2) annual land cover classifications. This function applies on one window, and should only be called using the function applyWindow4years. The image bands do not need to be in order, but the bandNames argument must be in chronological order. The temporal filter inspects the central position of four consecutive years, and if the extremities of the consecutive years are identical but the centre position is not, then the central pixels are reclassified to match its temporal neighbour class. This function can be applied to whichever land cover values the user decides, such as all of the land cover values or a select few.
def mask4(imagem, value, bandNames): mask = imagem.select(bandNames[0]).eq(value) \ .bitwiseAnd(imagem.select(bandNames[1]).neq(value)) \ .bitwiseAnd(imagem.select(bandNames[2]).neq(value)) \ .bitwiseAnd(imagem.select(bandNames[3]).eq(value)) change_img = imagem.select(bandNames[1]).mask(mask.eq(1)).where(mask.eq(1), value) change_img1 = imagem.select(bandNames[2]).mask(mask.eq(1)).where(mask.eq(1), value) img_out = imagem.select(bandNames[1]).blend(change_img).blend(change_img1) return img_out
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def applyWindow4years(imagem, value, bandNames):\n img_out = imagem.select(bandNames[0])\n for i in np.arange(1, len(bandNames)-2):\n img_out = img_out.addBands(mask4(imagem, value,bandNames[(i-1):(i+3)]))\n img_out = img_out.addBands(imagem.select(bandNames[-2]))\n img_out = img_out.addBands(im...
[ "0.77455264", "0.69279945", "0.6802912", "0.58571285", "0.5832874", "0.5571592", "0.5528454", "0.51787966", "0.50988805", "0.49669826", "0.49113834", "0.4877425", "0.48612544", "0.48353228", "0.4781184", "0.47058412", "0.46759126", "0.4645408", "0.45981827", "0.45730013", "0....
0.49336118
10
A helper function to perform 5 year moving window filter for a single land cover value (such as Forest as 1) for one five year window representing year(i1), year(i), year(i+1), year(i+2), and year(i+3) annual land cover classifications. This function applies on one window, and should only be called using the function applyWindow5years. The image bands do not need to be in order, but the bandNames argument must be in chronological order. The temporal filter inspects the central position of five consecutive years, and if the extremities of the consecutive years are identical but the centre position is not, then the central pixels are reclassified to match its temporal neighbour class. This function can be applied to whichever land cover values the user decides, such as all of the land cover values or a select few.
def mask5(imagem, value, bandNames): mask = imagem.select(bandNames[0]).eq(value) \ .bitwiseAnd(imagem.select(bandNames[1]).neq(value)) \ .bitwiseAnd(imagem.select(bandNames[2]).neq(value)) \ .bitwiseAnd(imagem.select(bandNames[3]).neq(value)) \ .bitwiseAnd(imagem.select(bandNames[4]).eq(value)) change_img = imagem.select(bandNames[1]).mask(mask.eq(1)).where(mask.eq(1), value) change_img1 = imagem.select(bandNames[2]).mask(mask.eq(1)).where(mask.eq(1), value) change_img2 = imagem.select(bandNames[3]).mask(mask.eq(1)).where(mask.eq(1), value) img_out = imagem.select(bandNames[1]).blend(change_img).blend(change_img1).blend(change_img2) return img_out
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def applyWindow5years(imagem, value, bandNames):\n img_out = imagem.select(bandNames[0])\n for i in np.arange(1, len(bandNames)-3):\n img_out = img_out.addBands(mask5(imagem, value,bandNames[(i-1):(i+4)]))\n img_out = img_out.addBands(imagem.select(bandNames[-3]))\n img_out = img_out.addBands(im...
[ "0.80519295", "0.7068984", "0.69225854", "0.57098013", "0.5579009", "0.55375785", "0.5408724", "0.5229599", "0.50659686", "0.5013549", "0.494298", "0.49226463", "0.48234645", "0.4740639", "0.46834993", "0.46194592", "0.46066454", "0.45539072", "0.45349857", "0.45121947", "0.4...
0.5577494
5
Function to perform a 5 year moving window filter for a single land cover value (such as Forest as 1) for all years in an image. Calls the function mask5. The image bands do not need to be in order, but the bandNames argument must be in chronological order. The temporal filter inspects the central position of five consecutive years, and if the extremities of the consecutive years are identical but the centre position is not, then the central pixels are reclassified to match its temporal neighbour class. This function can be applied to whichever land cover values the user decides, such as all of the land cover values or a select few.
def applyWindow5years(imagem, value, bandNames): img_out = imagem.select(bandNames[0]) for i in np.arange(1, len(bandNames)-3): img_out = img_out.addBands(mask5(imagem, value,bandNames[(i-1):(i+4)])) img_out = img_out.addBands(imagem.select(bandNames[-3])) img_out = img_out.addBands(imagem.select(bandNames[-2])) img_out = img_out.addBands(imagem.select(bandNames[-1])) return img_out
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def mask5(imagem, value, bandNames):\n mask = imagem.select(bandNames[0]).eq(value) \\\n .bitwiseAnd(imagem.select(bandNames[1]).neq(value)) \\\n .bitwiseAnd(imagem.select(bandNames[2]).neq(value)) \\\n .bitwiseAnd(imagem.select(bandNames[3]).neq(value)) \\\n .bitwiseAnd(imagem.selec...
[ "0.66861373", "0.65514684", "0.6478541", "0.5613955", "0.54316807", "0.5428405", "0.5313811", "0.520505", "0.51899797", "0.50904", "0.50412196", "0.5036899", "0.49864736", "0.47982645", "0.4767842", "0.47645608", "0.4758746", "0.47383195", "0.46717232", "0.46011153", "0.45707...
0.80881137
0
Function to perform a 4 year moving window filter for a single land cover value (such as Forest as 1) for all years in an image. Calls the function mask4. The image bands do not need to be in order, but the bandNames argument must be in chronological order. The temporal filter inspects the central position of four consecutive years, and if the extremities of the consecutive years are identical but the centre position is not, then the central pixels are reclassified to match its temporal neighbour class. This function can be applied to whichever land cover values the user decides, such as all of the land cover values or a select few.
def applyWindow4years(imagem, value, bandNames): img_out = imagem.select(bandNames[0]) for i in np.arange(1, len(bandNames)-2): img_out = img_out.addBands(mask4(imagem, value,bandNames[(i-1):(i+3)])) img_out = img_out.addBands(imagem.select(bandNames[-2])) img_out = img_out.addBands(imagem.select(bandNames[-1])) return img_out
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def applyWindow5years(imagem, value, bandNames):\n img_out = imagem.select(bandNames[0])\n for i in np.arange(1, len(bandNames)-3):\n img_out = img_out.addBands(mask5(imagem, value,bandNames[(i-1):(i+4)]))\n img_out = img_out.addBands(imagem.select(bandNames[-3]))\n img_out = img_out.addBands(im...
[ "0.6658748", "0.64081764", "0.6048875", "0.5828636", "0.53740543", "0.5318547", "0.5314894", "0.5258221", "0.5224046", "0.49241713", "0.48995483", "0.48989847", "0.48840585", "0.47406876", "0.47206843", "0.4713083", "0.46992207", "0.46542522", "0.4640126", "0.4630526", "0.462...
0.76945615
0
Function to perform a 3 year moving window filter for a single land cover value (such as Forest as 1) for all years in an image. Calls the function mask3. The image bands do not need to be in order, but the bandNames argument must be in chronological order. The temporal filter inspects the central position of three consecutive years, and if the extremities of the consecutive years are identical but the centre position is not, then the central pixels are reclassified to match its temporal neighbour class. This function can be applied to whichever land cover values the user decides, such as all of the land cover values or a select few.
def applyWindow3years(imagem, value, bandNames): img_out = imagem.select(bandNames[0]) for i in np.arange(1, len(bandNames)-1): img_out = img_out.addBands(mask3(imagem, value,bandNames[(i-1):(i+2)])) img_out = img_out.addBands(imagem.select(bandNames[-1])) return img_out
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def applyWindow4years(imagem, value, bandNames):\n img_out = imagem.select(bandNames[0])\n for i in np.arange(1, len(bandNames)-2):\n img_out = img_out.addBands(mask4(imagem, value,bandNames[(i-1):(i+3)]))\n img_out = img_out.addBands(imagem.select(bandNames[-2]))\n img_out = img_out.addBands(im...
[ "0.6262926", "0.61455876", "0.6122352", "0.59712267", "0.5810346", "0.56390077", "0.5292091", "0.5260056", "0.52369714", "0.5159779", "0.5131087", "0.50615054", "0.49572933", "0.48659185", "0.4854936", "0.4852849", "0.48406282", "0.48388356", "0.47845525", "0.47620434", "0.47...
0.7711574
0
Function to perform a 3 year window filter for a single land cover value (such as Forest as 1) for the first year in an image. The image bands do not need to be in order, but the bandNames argument must be in chronological order. For the first year of land cover classifications, a three consecutive years window is used and if the classifications of the first and last years are different from its neighbours, this values are replaced by the classification of its matching neighbours. This function can be applied to whichever land cover values the user decides, such as all of the land cover values or a select few.
def applyMask3first(imagem, value, bandNames): mask = imagem.select(bandNames[0]).neq(value) \ .bitwiseAnd(imagem.select(bandNames[1]).eq(value)) \ .bitwiseAnd(imagem.select(bandNames[2]).eq(value)) change_img = imagem.select(bandNames[0]).mask(mask.eq(1)).where(mask.eq(1), value) img_out = imagem.select(bandNames[0]).blend(change_img) img_out = img_out.addBands(imagem.select(bandNames[1:])) return img_out
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def applyWindow3years(imagem, value, bandNames):\n img_out = imagem.select(bandNames[0])\n for i in np.arange(1, len(bandNames)-1):\n img_out = img_out.addBands(mask3(imagem, value,bandNames[(i-1):(i+2)]))\n img_out = img_out.addBands(imagem.select(bandNames[-1]))\n return img_out", "def apply...
[ "0.713149", "0.6458987", "0.6343248", "0.63266295", "0.613911", "0.57802016", "0.5521701", "0.5485822", "0.5108832", "0.49993712", "0.49877465", "0.4840823", "0.47786245", "0.47172713", "0.46206245", "0.46117097", "0.4591191", "0.45893195", "0.45841068", "0.45762342", "0.4554...
0.52166533
8
Function to perform a 3 year window filter for a single land cover value (such as Forest as 1) for the final year in an image. The image bands do not need to be in order, but the bandNames argument must be in chronological order. For the first year of land cover classifications, a three consecutive years window is used and if the classifications of the first and last years are different from its neighbours, this values are replaced by the classification of its matching neighbours. This function can be applied to whichever land cover values the user decides, such as all of the land cover values or a select few.
def applyMask3last(imagem, value, bandNames): mask = imagem.select(bandNames[-3]).eq(value) \ .bitwiseAnd(imagem.select(bandNames[-2]).eq(value)) \ .bitwiseAnd(imagem.select(bandNames[-1]).neq(value)) change_img = imagem.select(bandNames[-1]).mask(mask.eq(1)).where(mask.eq(1), value) img_out = imagem.select(bandNames[0:-1]) img_out = img_out.addBands(imagem.select(bandNames[-1]).blend(change_img)) return img_out
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def applyWindow3years(imagem, value, bandNames):\n img_out = imagem.select(bandNames[0])\n for i in np.arange(1, len(bandNames)-1):\n img_out = img_out.addBands(mask3(imagem, value,bandNames[(i-1):(i+2)]))\n img_out = img_out.addBands(imagem.select(bandNames[-1]))\n return img_out", "def apply...
[ "0.73957485", "0.67384607", "0.65364456", "0.6378453", "0.61525947", "0.61262476", "0.5607345", "0.55978113", "0.5576969", "0.4884514", "0.48811036", "0.48680866", "0.4859982", "0.47968277", "0.47706997", "0.476356", "0.47634727", "0.47294396", "0.47226104", "0.4682663", "0.4...
0.53521746
9
Function to perform a forward moving gap fill for all years in an image. The image bands do not need to be in order, but the bandNames argument must be in chronological order. The forward gap fill is applied iteratively from the first year of bandNames through the final year, where if the current image has missing data, it is filled with the following year's values.
def applyForwardNoDataFilter(image, bandNames): #Get a list of band names from year(1) through the last year bandNamesEE = ee.List(bandNames[1:]) #Define forwards filter #In first iteration, bandName=bandNames[1] and previousImage is image.select(bandNames[0]), or the classifications for the first year #currentImage = image.select(bandNames[1]), the image for the second year #previousImage = image.select(bandNames[0]), the first year #Find where the second year has missing data, replace those values with the values of the first year #Append previousImage to currentImage, so now currentImage is a two band image, with the first band being the second year with the gap fill #and the second band is the first years classification #The iteration continues, now with followingImage.select[0] being the second year with the gap fill applied, and bandName is the third year def forwardNoDataFilter(bandName, previousImage): currentImage = image.select(ee.String(bandName)) previousImage = ee.Image(previousImage) currentImage = currentImage.unmask(previousImage.select([0])) return currentImage.addBands(previousImage) #Iterate through all the years, starting with the first year's classification filtered = bandNamesEE.iterate(forwardNoDataFilter,ee.Image(image.select(bandNames[0]))) filtered = ee.Image(filtered) return filtered.select(bandNames)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def applyBackwardNoDataFilter(image, bandNames):\n #Get a list of band names to iterate over, from year(-2) through year(0)\n bandNamesEE = ee.List(bandNames[:-1]).reverse()\n \n #Define backwards filter\n #In first iteration, bandName=bandNames[-2] and followingImage is image.select(bandNames[-1]),...
[ "0.65311575", "0.5876845", "0.5747447", "0.5284138", "0.52226", "0.5096014", "0.5058866", "0.5047209", "0.49857956", "0.49474898", "0.48613372", "0.48247787", "0.47784477", "0.46002764", "0.45790556", "0.45642906", "0.4548984", "0.4521293", "0.45085937", "0.44796604", "0.4468...
0.70489925
0
Function to perform a backward moving gap fill for all years in an image. The image bands do not need to be in order, but the bandNames argument must be in chronological order. The backward gap fill is applied iteratively from the last year of bandNames through the first year, where if the current image has missing data, it is filled with the previous year's values.
def applyBackwardNoDataFilter(image, bandNames): #Get a list of band names to iterate over, from year(-2) through year(0) bandNamesEE = ee.List(bandNames[:-1]).reverse() #Define backwards filter #In first iteration, bandName=bandNames[-2] and followingImage is image.select(bandNames[-1]), or the classifications for the final year #currentImage = image.select(bandNames[-2]), the second to last year #followingImage = image.select(bandNames[-1]), the final year #Find where the second to last year has missing data, replace those values with the values of the following year #Append followingImage to currentImage, so now currentImage is a two band image, with the first band being the second to last year with the gap fill #and the second band is the final years classification #The iteration continues, now with followingImage.select[0] being the second to last year with the gap fill applied, and bandName is the third to last year def backwardNoDataFilter(bandName, followingImage): currentImage = image.select(ee.String(bandName)) followingImage = ee.Image(followingImage) currentImage = currentImage.unmask(followingImage.select([0])) return currentImage.addBands(followingImage) #Apply backwards filter, starting with the final year and iterating through to year(0) filtered = bandNamesEE.iterate(backwardNoDataFilter,ee.Image(image.select(bandNames[-1]))) #Re-order bands to be in chronological order filtered = ee.Image(filtered) return filtered.select(bandNames)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def applyForwardNoDataFilter(image, bandNames):\n #Get a list of band names from year(1) through the last year\n bandNamesEE = ee.List(bandNames[1:])\n \n #Define forwards filter\n #In first iteration, bandName=bandNames[1] and previousImage is image.select(bandNames[0]), or the classifications for ...
[ "0.7032449", "0.5949247", "0.5812707", "0.5253641", "0.51127684", "0.49725264", "0.47500893", "0.46962532", "0.46487218", "0.45113215", "0.44795865", "0.44701588", "0.4448179", "0.4420199", "0.4388033", "0.43850613", "0.43616962", "0.4360365", "0.43425742", "0.43269235", "0.4...
0.7573101
0
Function to apply forward gap filling and backward gap filling to an image. The image bands do not need to be in order, but the bandNames argument must be in chronological order. This funciton calls applyForwardNoDataFilter then applyBackwardNoDataFilter
def applyGapFilter(image, bandNames): filtered = applyForwardNoDataFilter(image, bandNames) filtered = applyBackwardNoDataFilter(filtered, bandNames) return filtered
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def applyForwardNoDataFilter(image, bandNames):\n #Get a list of band names from year(1) through the last year\n bandNamesEE = ee.List(bandNames[1:])\n \n #Define forwards filter\n #In first iteration, bandName=bandNames[1] and previousImage is image.select(bandNames[0]), or the classifications for ...
[ "0.7110317", "0.704121", "0.5153824", "0.51472473", "0.50976783", "0.5092685", "0.50910985", "0.5038838", "0.4983372", "0.4950319", "0.48741138", "0.48643586", "0.4801015", "0.47917843", "0.47886074", "0.47822264", "0.4780743", "0.47651857", "0.47144574", "0.4703689", "0.4698...
0.745405
0
Function to calculate the total number of times a pixel changed classes across the time series
def calculateNumberOfChanges(image, bandNames): #Get a collection of images where each image has 2 bands: classifications for year(i) and classifications for year(i+1) lc_one_change_col = npv.getYearStackIC(image,bandNames, band_indices=[0,1]) #Get a collection of images where each image represents whether there was change from year(i) to year(i+1) and convert to an image lc_one_change_col = lc_one_change_col.map(npv.LC_OneChange) lc_one_change_image = lc_one_change_col.toBands() #Calculate the number of changes by applying the sum reducer lc_sum_changes = lc_one_change_image.reduce(ee.Reducer.sum().unweighted()) return lc_sum_changes
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_class_count(df):\r\n \r\n return df[\"class\"].value_counts()", "def resultCounter(detections):\n counter = 0\n for attribute, value in classIterator(detections):\n if 'crease' in attribute:\n counter += len(value)\n return counter", "def num_classes(self) -> int:\n ...
[ "0.63271403", "0.6253417", "0.6221684", "0.6215353", "0.61849016", "0.6137284", "0.6085611", "0.60084146", "0.59348655", "0.5917543", "0.588262", "0.588262", "0.588262", "0.58785236", "0.58546215", "0.58513844", "0.58418506", "0.58400005", "0.5837566", "0.5830539", "0.5821067...
0.6240185
2
Function to apply an incidence filter. The incidence filter finds all pixels that changed more than numChangesCutoff times and is connected to less than connectedPixelCutoff pixels, then replaces those pixels with the MODE value of that given pixel position in the stack of years.
def applyIncidenceFilter(image, bandNames, classDictionary, numChangesCutoff = 8, connectedPixelCutoff=6): #Calculate the number of times a pixel changes throughout the time series and determine if it is over the numChangesCutoff num_changes = calculateNumberOfChanges(image, bandNames) too_many_changes = num_changes.gt(numChangesCutoff) #Get binary images of the land cover classifications for the current year binary_class_images = npv.convertClassificationsToBinaryImages(image, classDictionary) #Calculate the number of connected pixels for each land cover class and year, reduce to a single band image representing the number #of connected pixels of the same land cover class as the central pixel, and determine if it is over the connectedPixelCutoff connected_pixel_count = ee.ImageCollection(binary_class_images.map(lambda x: x.mask(x).connectedPixelCount(100,False).reduce(ee.Reducer.sum()).lt(connectedPixelCutoff))) #Get a bitwiseAnd determination if the number of connected pixels <= connectedPixelCutoff and the number of changes > numChangesCutoff incidence_filter = ee.ImageCollection(connected_pixel_count.map(lambda x: x.bitwiseAnd(too_many_changes))).toBands().rename(bandNames) #Get an image that represents the mode of the land cover classes in each pixel mode_image = image.reduce(ee.Reducer.mode()) #Replace pixels of image where incidence_filter is True with mode_image incidence_filtered = image.where(incidence_filter, mode_image) return incidence_filtered
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def applyFrequencyFilter(image, yearBandNames, classDictionary, filterParams): \n #Grab land cover classes as a list of strings\n lc_classes = classDictionary.keys().getInfo()\n \n #Get binary images of the land cover classifications for the current year\n binary_class_images = npv.convertClassif...
[ "0.52584434", "0.501605", "0.50095785", "0.5009003", "0.5002469", "0.49740124", "0.49180493", "0.49099478", "0.48357704", "0.47827873", "0.47746482", "0.47566548", "0.4741615", "0.4737234", "0.46963915", "0.46842596", "0.4639385", "0.46338367", "0.46156648", "0.46046755", "0....
0.7578108
0
Function to apply an frequency filter. This filter takes into consideration the occurrence frequency throughout the entire time series. Thus, all class occurrence with less than given percentage of temporal persistence (eg. 3 years or fewer out of 33) are replaced with the MODE value of that given pixel position in the stack of years.
def applyFrequencyFilter(image, yearBandNames, classDictionary, filterParams): #Grab land cover classes as a list of strings lc_classes = classDictionary.keys().getInfo() #Get binary images of the land cover classifications for the current year binary_class_images = npv.convertClassificationsToBinaryImages(image, classDictionary) #Get the frequency of each class through the years by reducing the image collection to an image using the sum reducer class_frequency = binary_class_images.reduce(ee.Reducer.sum().unweighted()).rename(lc_classes) #Get an image that represents the mode of the land cover classes in each pixel mode_image = image.reduce(ee.Reducer.mode()) #Define an image to add bands with frequency filter applied out_img = ee.Image() #Loop through years for yearBand in yearBandNames: #Select the target year from the image yearImage = image.select(yearBand) #Loop through land cover classes in filterParams for lc_class in lc_classes: #Get the minimum occurance allowed in that land cover class min_occurance = filterParams.get(lc_class) #Find if the land cover class had less than the number of min_occurances in each pixel change_class = class_frequency.select(lc_class).lt(ee.Image.constant(min_occurance)) #If change_class==1, then replace that pixel with the mode of all the years in that pixel #This filter is only applied to pixels of this land cover class #First mask yearImage to pixels of this land cover class, then get the union of pixels where change_class==1, #if both conditions are true, then the pixel is replaced with the mode yearImage = yearImage.where(yearImage.eq(ee.Number(classDictionary.get(lc_class))).bitwiseAnd(change_class),mode_image) #Rename yearImage to bandName yearImage = yearImage.rename(yearBand) #Append to output image out_img = out_img.addBands(yearImage) return out_img
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def frequency_filter(fc, L, srf, KIND=2):\n\n if hasattr(KIND, \"__len__\"):\n PASS = KIND\n KIND = 2\n else:\n PASS = [2,3]\n KIND = [KIND]\n\n # fourier transform of lateral inhibitory function \n\n # tonotopic axis\n if issubclass(type(fc), str):\n fc = float(fc...
[ "0.60981953", "0.56942517", "0.5577544", "0.5543279", "0.55281085", "0.5500619", "0.5476223", "0.5473143", "0.54327935", "0.5337489", "0.5303522", "0.5286453", "0.52189946", "0.521232", "0.5156549", "0.51182044", "0.51102227", "0.50913894", "0.5088179", "0.5085512", "0.501948...
0.69544667
0
Function to apply a probability filter to land cover probabilities in each image of imageCollection. The user defines which classes will be filtered and how to filter them in the params list. The params list is a list of dictionaries, one for each class the user wants to filter.
def applyProbabilityCutoffs(imageCollection, params): #Define function to map across imageCollection def probabilityFilter(image): #Get the classifications from the class with the highest probability classifications = npv.probabilityToClassification(image) #Loop through parameters for param in params: #Load parameter values class_name = param.get('class_name') class_value = param.get('class_value') filter_name = param.get('filter') threshold = param.get('threshold') if filter_name=='gt': #Find where the class_name is greater than threshold prob_mask = image.select(class_name).gt(ee.Image.constant(threshold)) #Replace those pixels with the class value classifications = classifications.where(prob_mask,class_value) elif filter_name=='gte': #Find where the class_name is greater than or equal to threshold prob_mask = image.select(class_name).gte(ee.Image.constant(threshold)) #Replace those pixels with the class value classifications = classifications.where(prob_mask,class_value) elif filter_name == 'lte': #Find where the class_name is less than or equal to threshold prob_mask = image.select(class_name).lte(ee.Image.constant(threshold)) #Find where classifications are equal to class value class_mask = classifications.eq(class_value) #We only want to replace pixels where the class probability<=threshold AND classification==class_value reclass_mask = prob_mask.bitwiseAnd(class_mask) #Define square kernel of surrounding pixels kernel = ee.Kernel.square(1) #Convert to a multiband image, one band for each neighbor neighs = classifications.neighborhoodToBands(kernel) #Reduce to find the majority class in neighborhood majority = neighs.reduce(ee.Reducer.mode()) #Replace pixels where the class probability<=threshold AND classification==class_value with the neighborhood majority class classifications = classifications.where(reclass_mask,majority) else: #Find where the class_name is less than or equal to threshold prob_mask = image.select(class_name).lt(ee.Image.constant(threshold)) #Find where classifications are equal to class value class_mask = classifications.eq(class_value) #We only want to replace pixels where the class probability<=threshold AND classification==class_value reclass_mask = prob_mask.bitwiseAnd(class_mask) #Define square kernel of surrounding pixels kernel = ee.Kernel.square(1) #Convert to a multiband image, one band for each neighbor neighs = classifications.neighborhoodToBands(kernel) #Reduce to find the majority class in neighborhood majority = neighs.reduce(ee.Reducer.mode()) #Replace pixels where the class probability<=threshold AND classification==class_value with the neighborhood majority class classifications = classifications.where(reclass_mask,majority) return ee.Image(classifications) return ee.ImageCollection(imageCollection.map(probabilityFilter))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def applyFrequencyFilter(image, yearBandNames, classDictionary, filterParams): \n #Grab land cover classes as a list of strings\n lc_classes = classDictionary.keys().getInfo()\n \n #Get binary images of the land cover classifications for the current year\n binary_class_images = npv.convertClassif...
[ "0.6045485", "0.5797399", "0.5739651", "0.57369685", "0.5731208", "0.56458217", "0.55604017", "0.55141634", "0.5474806", "0.5470038", "0.5451431", "0.5425758", "0.53987706", "0.53475237", "0.5286839", "0.52788055", "0.52607375", "0.5203152", "0.5157591", "0.5144034", "0.51254...
0.79373574
0
Return a logger with a default ColoredFormatter.
def setup_logger(): formatter = ColoredFormatter( ( '%(log_color)s%(levelname)-5s%(reset)s ' '%(yellow)s[%(asctime)s]%(reset)s' '%(green)s %(name)s %(purple)s %(filename)s %(purple)s %(funcName)s %(purple)s:%(lineno)d%(reset)s ' '%(bold_blue)s%(message)s%(reset)s' ), datefmt='%y-%m-%d %H;%M:%S', log_colors={ 'DEBUG': 'blue', 'INFO': 'yellow', 'WARNING': 'red', 'ERROR': 'blue,bg_bold_red', 'CRITICAL': 'red,bg_white', } ) logger = logging.getLogger('shen-yue-is-beautiful') handler = logging.StreamHandler() handler.setFormatter(formatter) logger.addHandler(handler) logger.setLevel(logging.DEBUG) return logger
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def create_logger():\n log = logging.getLogger() # root logger\n log.setLevel(logging.DEBUG)\n format_str = '%(asctime)s - %(levelname)-8s - %(message)s'\n date_format = '%Y-%m-%d %H:%M:%S'\n if os.isatty(2):\n cformat = '%(log_color)s' + format_str\n colors = {'DEBUG': 'reset',\n ...
[ "0.6996591", "0.69907147", "0.6911573", "0.682854", "0.65775317", "0.65120155", "0.6396354", "0.6385912", "0.6341703", "0.6268824", "0.61986893", "0.6196991", "0.618999", "0.61819875", "0.6148457", "0.6129842", "0.6128831", "0.6113148", "0.61027133", "0.60929334", "0.60799354...
0.67095196
4
Create and use a logger.
def main(): logger = setup_logger() logger.debug('a debug message') logger.info('an info message') logger.warning('a warning message') logger.error('an error message') logger.critical('a critical message')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def create_logger() -> logging.Logger:\n pass # TODO: Replace with implementation!", "def create_logger():\r\n global logger\r\n logger = logging.getLogger(logger_name)\r\n\r\n formatter = logging.Formatter(fmt='%(asctime)s %(levelname)s %(message)s')\r\n \r\n handler = logging.StreamHandler()...
[ "0.81048304", "0.80497605", "0.78264886", "0.77451503", "0.7742907", "0.762832", "0.7588399", "0.7549163", "0.75487506", "0.7536273", "0.74903685", "0.74829865", "0.74677", "0.7460846", "0.7454843", "0.74143606", "0.74058366", "0.73974717", "0.73834455", "0.73752475", "0.7360...
0.0
-1
Converts text data to feature data with the instance's vectorizer. Returns None.
def process_data(self): self.processed_data = dict() for split,text_data_ in self.text_data.items(): y = text_data_[self.target_col].values print("Vectorizing for split: "+split) x = np.array([self.vectorizer(x_) for x_ in text_data_['Text']]) self.processed_data[split] = {'x':x,'y':y} self.set_split(self.split_)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def TransformData(text):\n global COUNT_VECTORIZER\n if COUNT_VECTORIZER is None:\n COUNT_VECTORIZER = CountVectorizer(analyzer = 'word', lowercase = True)\n COUNT_VECTORIZER.fit(text)\n features = COUNT_VECTORIZER.transform(text)\n features_nd = features.toarray() # for easy usage\n ...
[ "0.7076867", "0.70379716", "0.67895293", "0.67181784", "0.6577113", "0.64362943", "0.63970965", "0.6308338", "0.6303764", "0.6243157", "0.6215836", "0.62142926", "0.61975324", "0.6172654", "0.61442214", "0.61173296", "0.6084646", "0.6082078", "0.6061634", "0.6042774", "0.6021...
0.61769956
13
Sets the current active partition.
def set_split(self,split='train'): self._target_data = self.processed_data[split] self.split_ = split
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def setActive(self, active):\n\n self._active = active", "def set_active(self, active):\n self._active = active", "def set_active(self, active):\n self.active = active", "def active(self, active):\n\n self._active = active", "def active(self, active):\n\n self._active = a...
[ "0.6547811", "0.65143806", "0.6484135", "0.6361823", "0.6361823", "0.6361823", "0.6361823", "0.6254472", "0.6218527", "0.6194217", "0.61550754", "0.61421317", "0.61362016", "0.5958744", "0.5954031", "0.5954031", "0.5954031", "0.5954031", "0.5954031", "0.58707124", "0.5751636"...
0.0
-1
If data has not been processed, calls process_data. Returns None.
def check_Data(self): if self._target_data is None: self.processData()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def process(proc_data):\n\n # No further processing\n return proc_data", "def process_data(self, data):\n return data", "def process_data_impl(\n self,\n data_dir: Path,\n output_processed_data_dir: Path,\n ) -> NoReturn:\n pass", "def run(self, data):\...
[ "0.78714263", "0.7426427", "0.7234299", "0.72270745", "0.71373206", "0.69578606", "0.67860335", "0.67860335", "0.67753035", "0.67753035", "0.67753035", "0.67753035", "0.67753035", "0.67753035", "0.66798294", "0.6571338", "0.6558113", "0.6555273", "0.6539222", "0.64652324", "0...
0.70434296
5
Determines the number of batches.
def get_num_batches(self,batch_size): return len(self) // batch_size
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def number_of_batches(self):\n return int(np.floor(len(self.file_paths_list) / self.batch_size))", "def batch_size(self) -> int:\n ...", "def num_batches(self):\n\t\t\n\t\treturn len(self.batch_stats)", "def _update_num_batches(self):\n # maximum possible number of batches is equal to nu...
[ "0.8359936", "0.8323407", "0.80367476", "0.7839606", "0.778999", "0.77374494", "0.76503813", "0.7589813", "0.752585", "0.7504559", "0.7504559", "0.7504559", "0.7504559", "0.7446192", "0.7433917", "0.7410418", "0.73296297", "0.7293308", "0.7237539", "0.7229498", "0.7215725", ...
0.82500005
2
Returns the number of features in the processed data. Returns int Feature size.
def get_num_features(self): return len(self[0]['x'])
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def features_size(self) -> int:\n return len(self.data[0].features) if len(self.data) > 0 and self.data[0].features is not None else None", "def getNrFeatures(self):\n return self.featureNames.size", "def num_features(self):\n if self.x is None:\n return 0\n return 1 if s...
[ "0.8408814", "0.83368707", "0.8241575", "0.8142289", "0.80863315", "0.80641556", "0.8048011", "0.79128486", "0.7869567", "0.7836357", "0.78265285", "0.78225327", "0.77721834", "0.7672442", "0.75243306", "0.74740946", "0.7469065", "0.7446943", "0.74352556", "0.7432964", "0.742...
0.8641283
0
Returns a list of the class labels. Returns list List of class labels..
def get_class_labels(self): y = self.get_data()['y'] if type(y) == torch.Tensor: return y.unique().numpy() else: return sorted(list(set(y)))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def class_labels(self):\n return self._class_labels", "def classes(self) -> List[Any]:\n return list(self.label_counts.keys())", "def get_labels(self) -> List[str]:\n return self.labels", "def get_labels(self) -> List[str]:\n raise NotImplementedError()", "def label_names(self) ...
[ "0.84051836", "0.7856161", "0.7742177", "0.7669178", "0.7610997", "0.757372", "0.754162", "0.75304675", "0.7472855", "0.72751015", "0.72687674", "0.71715975", "0.71615297", "0.71615297", "0.71615297", "0.71615297", "0.71615297", "0.71615297", "0.71392363", "0.71387213", "0.71...
0.73184854
9
Returns the index corresponding to the given class label.
def lookup_class_idx(self,label): return self.class_labels[label]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_class_index(self, label):\n assert label in CLASSES\n return CLASSES.index(label)", "def get_class_index(label):\n if isinstance(label,str) is False:\n basic.outputlogMessage('input label must be a string')\n assert(False)\n length = len(class_label)\n for i in range(...
[ "0.8939161", "0.8781351", "0.8290658", "0.79299194", "0.75193024", "0.7453748", "0.7260702", "0.7260702", "0.7200521", "0.7144214", "0.6997018", "0.6986183", "0.6928649", "0.68628794", "0.67985356", "0.6562468", "0.64826816", "0.6404319", "0.6269193", "0.6264292", "0.62591416...
0.9071037
0
Returns ndarrays or Tensors of all data in the current split.
def get_data(self,split=None,numpy=True): if split is not None: split_ = self.split_ self.set_split(split) dataloader = DataLoader(self,batch_size=len(self),shuffle=False, drop_last=False) for i,data_item in enumerate(dataloader): assert(i==0) x = data_item['x'] y = data_item['y'] if numpy: if type(x) == torch.Tensor: x = x.detach().numpy() else: x = np.array(x) if type(y) == torch.Tensor: y = y.detach().numpy() else: y = np.array(y) if split is not None: self.split_ = split_ return {'x':x,'y':y}
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def all_data(self) -> Optional[np.ndarray]:\n if self._data_store is None:\n return None\n return self._data_store[:self._count, :]", "def full_batch(self):\n return self.X_data, self.Y_data", "def getNdArray(self):\n futures = self.client.map(_call_getNdArray, self.vecDask, pure=False...
[ "0.62569696", "0.618584", "0.6073904", "0.6042331", "0.5926986", "0.5832924", "0.5821185", "0.5761303", "0.5756968", "0.57559574", "0.57297504", "0.57242715", "0.57101953", "0.570451", "0.5688236", "0.56806684", "0.56590205", "0.564544", "0.56339717", "0.55949557", "0.5587822...
0.5634289
18
Partitions the full data into a list of ndarrays/Tensors.
def get_n_folds(self,split=None,N=5,numpy=True,perm=None): data = self.get_data(split,numpy) X = data['x'] y = data['y'] size = len(y) if perm is None: perm = np.random.permutation(size) elif len(perm) != size: raise Exception("Permutation provided is wrong length: "+\ str(len(perm))+" vs "+str(size)) X = X[perm,:] y = y[perm,:] x_folds = np.split(X,N,axis=0) y_folds = np.split(y,N,axis=0) return [{'x':x_folds[i],'y':y_folds[i]} for i in range(N)]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _batchify(data: nd.NDArray, batch_size):\n # Work out how cleanly we can divide the dataset into bsz parts.\n nbatch = len(data) // batch_size\n # Trim off any extra elements that wouldn't cleanly fit (remainders).\n data = data[0: nbatch * batch_size]\n # Evenly divide the d...
[ "0.6259908", "0.62205553", "0.60901254", "0.6081688", "0.6013226", "0.6012804", "0.600478", "0.5999807", "0.59407836", "0.5916167", "0.59141505", "0.5829531", "0.5814003", "0.5805772", "0.58050174", "0.5766101", "0.5750351", "0.5743768", "0.5734869", "0.57283336", "0.57187784...
0.0
-1
Applies a function mapping to each element in the feature data.
def apply_fn(self,fn): self.check_Data() for split,data_ in self.processed_data.items(): x = data_['x'] x = np.array([fn(xi) for xi in x]) data_['x'] = x
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def map(self, function=lambda value: value):\n for j, value in enumerate(self):\n self[j] = function(value)", "def map(self, function=lambda item: item):\n for i, row in enumerate(self):\n for j, item in enumerate(row):\n row[j] = function(item)", "def map(sel...
[ "0.73664135", "0.7220546", "0.70397", "0.70174754", "0.685345", "0.684726", "0.68348914", "0.6801942", "0.6742367", "0.6730765", "0.6695242", "0.6670184", "0.6574957", "0.6559686", "0.6507356", "0.6469651", "0.6460323", "0.6443097", "0.64101386", "0.64018846", "0.63930345", ...
0.7359208
1
Converts a string of text into a numerical vector of features based on the word embedding LTM.
def vectorize(self,text): lv_active = set() words = word_tokenize(text) for word in words: if word in self.tree: ancestors = self.tree.word_ancestors(word) lv_active.update(ancestors) return self.nl.isin(lv_active).values
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def to_vector(text, model, idf, is_tokenized=False):\n if not is_tokenized: text= text.split() # splits the text by space and returns a list of words\n vec = np.zeros(300) # creates an empty vector of 300 dimensions\n for word in text: # iterates over the sentence\n if (word in model) & (word in id...
[ "0.70021975", "0.6557391", "0.65419817", "0.65404963", "0.6515521", "0.64831823", "0.6474055", "0.64408255", "0.64127994", "0.6378021", "0.63599265", "0.63383466", "0.6331677", "0.6309186", "0.63056415", "0.6245646", "0.6237467", "0.62303", "0.62136185", "0.61973566", "0.6189...
0.0
-1
Calls super mehtod and adds a MLTM vector to the data dict.
def vectorize(self, source_text, target_text, use_dataset_max_lengths=True): data = super().vectorize(source_text, target_text, use_dataset_max_lengths) mltm_x_vector = self.mltm_vectorizer.vectorize(source_text.lower()) mltm_x_vector = mltm_x_vector.astype(np.float32) data["x_source_mltm_vector"] = mltm_x_vector return data
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def add_data(self, v, m, x, pos=1):\n if x is not None:\n if v in self.variables:\n if m in self.models:\n self.data.update({self.__gen_key(m, v, pos): x})\n self.pos.update({self.__gen_key(m, v, pos): pos})\n else:\n ...
[ "0.62161565", "0.60280395", "0.5786776", "0.5678434", "0.5514574", "0.54189706", "0.538351", "0.53775567", "0.5377541", "0.53738487", "0.53701663", "0.5347109", "0.53244966", "0.52985543", "0.52572984", "0.5225952", "0.5215003", "0.52027744", "0.5197203", "0.51853836", "0.518...
0.0
-1
the primary entry point method for PyTorch datasets
def __getitem__(self, index): row = self._target_df.iloc[index] vector_dict = self._vectorizer.vectorize(row.source_language, row.target_language) return {"x_source": vector_dict["source_vector"], "x_target": vector_dict["target_x_vector"], "y_target": vector_dict["target_y_vector"], "x_source_length": vector_dict["source_length"], "x_source_mltm_vector": vector_dict["x_source_mltm_vector"]}
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def loadData(self):\n batch_size = 256\n \n #if self.conv_sg == True:\n # batch_size = 1 \n \n download = True\n root = self.root + self.dataset\n if self.dataset == \"MNIST\": \n transform = transforms.Compose([transforms.ToTensor(), tra...
[ "0.71006465", "0.67667514", "0.67667514", "0.6723365", "0.6689075", "0.66602546", "0.66412747", "0.6627261", "0.65898776", "0.658745", "0.6563074", "0.6560465", "0.65538913", "0.6544277", "0.6510383", "0.6499532", "0.6479281", "0.6478729", "0.64679545", "0.6463354", "0.645740...
0.0
-1
Generates a new MLP using the nn.Sequential class. Returns
def generate(self): components = [] components.append(nn.Linear(self.n_features,self.hidden_sizes[0])) self._activation(components,self.activation) self._dropout(components,self.dropout) for i in range(1,len(self.hidden_sizes)): components.append(nn.Linear(self.hidden_sizes[i-1],self.hidden_sizes[i])) self._activation(components,self.activation) self._dropout(components,self.dropout) components.append(nn.Linear(self.hidden_sizes[-1],self.n_classes)) mlp = nn.Sequential(*components) num_params = sum(p.numel() for p in mlp.parameters() if p.requires_grad) print("Created MLP with "+str(num_params)+" learnable params") return mlp
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def make_mlp_model():\n return snt.Sequential([\n snt.nets.MLP([LATENT_SIZE] * NUM_LAYERS, activate_final=True),\n snt.LayerNorm()\n ])", "def mlp_model(self):\n\n model = Sequential()\n model.add(Dense(self.dense1, input_shape=(784,)))\n model.add(Activation(self...
[ "0.7659937", "0.75626165", "0.74614346", "0.7456196", "0.7027464", "0.70230585", "0.6835712", "0.67514044", "0.67060864", "0.6647528", "0.6643573", "0.659611", "0.6572788", "0.6491478", "0.6484547", "0.6470002", "0.64629227", "0.64441985", "0.64162695", "0.6321424", "0.628487...
0.8253452
0
Creates a new activation function and adds it to the list of components.
def _activation(self,components,activation): if activation == "ReLU": components.append(nn.ReLU()) elif activation == "Sigmoid": components.append(nn.Sigmoid()) else: raise Exception("Invalid activation fn: "+activation)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def construct_activation_function(self):\n # Add the activation function\n if not self.activation_function is None:\n # Check if it is a string\n if isinstance(self.activation_function, str):\n activation_function = get_activation_function_by_name(\n ...
[ "0.7639841", "0.6542784", "0.61630833", "0.6001237", "0.6001181", "0.59439", "0.591768", "0.5908562", "0.58893776", "0.58730316", "0.5850897", "0.5848719", "0.58140385", "0.58140385", "0.5795392", "0.5792487", "0.5785407", "0.57467926", "0.57267064", "0.5675934", "0.5668909",...
0.7331862
1
Adds a dropout object to the list of components
def _dropout(self,components,dropout=None): if dropout is not None: components.append(nn.Dropout(dropout))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def add(self, component) -> None:\n pass", "def add_component(self, componentInstance):\n\n #print \"Componet being added to %s entity.\"%(self._sName)\n #print componentInstance\n \n self._dComponents[componentInstance.get_name()] = componentInstance\n\n #These if state...
[ "0.63641316", "0.56994003", "0.5660805", "0.5521632", "0.5519748", "0.5501247", "0.54634714", "0.5435919", "0.53915066", "0.5373497", "0.53394043", "0.53063345", "0.5302059", "0.52375495", "0.52294815", "0.5180793", "0.51650107", "0.51634353", "0.515684", "0.51533777", "0.514...
0.673964
0
Sets model, data, and training algo parameters.
def __init__(self,model,dataset,args): self.args = args self.dataset = dataset self.model = model.to(args.device) self.optimizer = optim.Adam(model.parameters(), lr=args.learning_rate) self.scheduler = optim.lr_scheduler.ReduceLROnPlateau(optimizer=self.optimizer, mode='min', factor=0.5, patience=1) vectorizer = dataset.get_vectorizer() self.mask_index = vectorizer.target_vocab.mask_index self.train_state = make_train_state(args) self.epoch_bar = tqdm_notebook(desc='training routine', total=args.num_epochs, position=0) self.dataset.set_split('train') self.train_bar = tqdm_notebook(desc='split=train', total=dataset.get_num_batches(args.batch_size), position=1, leave=True) self.dataset.set_split('val') self.val_bar = tqdm_notebook(desc='split=val', total=dataset.get_num_batches(args.batch_size), position=1, leave=True)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def set_params(self, params: Dict):\n\n if params['training_instances'] is not None:\n self.training_instances = params['training_instances']\n if params['n'] is not None:\n self.n = params['n']\n if params['lda'] is not None:\n self.lda = params['lda']\n ...
[ "0.6915247", "0.6885677", "0.6774289", "0.67406434", "0.6733088", "0.6642049", "0.6622964", "0.65708476", "0.6508057", "0.64983726", "0.6465158", "0.6464744", "0.6418009", "0.6410264", "0.6372147", "0.63709724", "0.636245", "0.63596547", "0.63532597", "0.6346492", "0.63230634...
0.0
-1
Runs the training algorithm. Returns None.
def train(self): args = self.args model = self.model dataset = self.dataset train_state = self.train_state optimizer = self.optimizer scheduler = self.scheduler train_bar = self.train_bar val_bar = self.val_bar epoch_bar = self.epoch_bar for epoch_index in range(args.num_epochs): train_state['epoch_index'] = epoch_index # Iterate over training dataset running_loss,running_acc = self.train_loop(epoch_index, args, model, dataset, optimizer, train_bar) train_state['train_loss'].append(running_loss) train_state['train_acc'].append(running_acc) running_loss,running_acc = self.val_loop(epoch_index, args, model, dataset, optimizer, val_bar) train_state['val_loss'].append(running_loss) train_state['val_acc'].append(running_acc) print("Epoch "+str(epoch_index+1)+": Running loss="+ \ str(running_loss)+", Running Acc="+str(running_acc)) train_state = update_train_state(args=args, model=model, train_state=train_state) scheduler.step(train_state['val_loss'][-1]) if train_state['stop_early']: break train_bar.n = 0 val_bar.n = 0 epoch_bar.set_postfix(best_val=train_state['early_stopping_best_val'] ) epoch_bar.update() state_dict = torch.load(train_state['model_filename']) model.load_state_dict(state_dict) return model
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def train():\n # YOUR TRAINING CODE GOES HERE", "def train():\n pass", "def train(self):\n # Change directory to the code directory\n current_working_directory = os.getcwd()\n\n os.chdir(self.model_parameters[\"NN_code_directory\"])\n\n self.call_training_routine()\n\n ...
[ "0.77021474", "0.7665013", "0.75775045", "0.754038", "0.74701804", "0.7316239", "0.72936034", "0.7281923", "0.7249765", "0.7234686", "0.72101897", "0.7197399", "0.7190534", "0.71843356", "0.7157707", "0.7157707", "0.7157707", "0.7157707", "0.7157707", "0.71547514", "0.7094425...
0.0
-1
Runs the training for a single epoch.
def train_loop(self,epoch_index,args,model,dataset,optimizer,train_bar): dataset.set_split('train') batch_generator = generate_nmt_batches(dataset, batch_size=args.batch_size, device=args.device) running_loss = 0.0 running_acc = 0.0 model.train() for batch_index, batch_dict in enumerate(batch_generator): # the training routine is these 5 steps: # -------------------------------------- # step 1. zero the gradients optimizer.zero_grad() # step 2. compute the output if isinstance(model,NMTModelWithMLTM): y_pred = model(batch_dict['x_source'], batch_dict['x_source_mltm_vector'], batch_dict['x_source_length'], batch_dict['x_target']) else: y_pred = model(batch_dict['x_source'], batch_dict['x_source_length'], batch_dict['x_target']) # step 3. compute the loss loss = sequence_loss(y_pred, batch_dict['y_target'], self.mask_index) # step 4. use loss to produce gradients loss.backward() # step 5. use optimizer to take gradient step optimizer.step() # ----------------------------------------- # compute the running loss and running accuracy running_loss += (loss.item() - running_loss) / (batch_index + 1) acc_t = compute_accuracy(y_pred, batch_dict['y_target'], self.mask_index) running_acc += (acc_t - running_acc) / (batch_index + 1) # update bar train_bar.set_postfix(loss=running_loss, acc=running_acc, epoch=epoch_index) train_bar.update() return running_loss,running_acc
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def train_one_epoch(self):\n raise NotImplementedError", "def train_one_epoch(self):\n\t\tself.model.train()\n\t\ttrain_loss = 0\n\n\t\tfor batch_idx, data in enumerate(self.data_loader.train_loader):\n\t\t\tInput = data[0].float().to(self.device)\n\t\t\tOutput = data[1].float().to(self.device)\n\n\t\t\ts...
[ "0.8152491", "0.7976923", "0.78916997", "0.78650874", "0.78335357", "0.78335357", "0.78335357", "0.78335357", "0.7772364", "0.7753938", "0.77308905", "0.7689781", "0.76347524", "0.7558736", "0.75391996", "0.75388575", "0.75285155", "0.7487567", "0.7441571", "0.7418234", "0.73...
0.0
-1
Evaluates the model on the validation set.
def val_loop(self,epoch_index,args,model,dataset,optimizer,val_bar): dataset.set_split('val') batch_generator = generate_nmt_batches(dataset, batch_size=args.batch_size, device=args.device) running_loss = 0.0 running_acc = 0.0 model.eval() for batch_index, batch_dict in enumerate(batch_generator): # step 1. compute the output if isinstance(model,NMTModelWithMLTM): y_pred = model(batch_dict['x_source'], batch_dict['x_source_mltm_vector'], batch_dict['x_source_length'], batch_dict['x_target']) else: y_pred = model(batch_dict['x_source'], batch_dict['x_source_length'], batch_dict['x_target']) # step 2. compute the loss loss = sequence_loss(y_pred, batch_dict['y_target'], self.mask_index) # ----------------------------------------- # compute the running loss and running accuracy running_loss += (loss.item() - running_loss) / (batch_index + 1) acc_t = compute_accuracy(y_pred, batch_dict['y_target'], self.mask_index) running_acc += (acc_t - running_acc) / (batch_index + 1) # update bar val_bar.set_postfix(loss=running_loss, acc=running_acc, epoch=epoch_index) val_bar.update() return running_loss,running_acc
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def validate(self):\n self.set_model_mode('eval')\n self.evaluator.reset()\n losses = MetricMeter()\n\n print('Do evaluation on {} set'.format('valid set'))\n data_loader = self.val_loader\n assert data_loader is not None\n for batch_idx, batch in enumerate(data_loa...
[ "0.7588236", "0.712658", "0.7041165", "0.70291466", "0.68915606", "0.6771378", "0.67110956", "0.6702142", "0.66912436", "0.66912436", "0.66912436", "0.6676459", "0.6667484", "0.6665729", "0.6603032", "0.6589405", "0.6569131", "0.6567912", "0.655075", "0.6545247", "0.65360427"...
0.0
-1
Tests the model on the test set, measuring accuracy. Returns float Total accuracy of the model on the test set.
def test(self): args = self.args model = self.model dataset = self.dataset dataset.set_split('test') batch_generator = generate_nmt_batches(dataset, batch_size=len(dataset), device=args.device) acc_sum = 0.0 model.eval() for batch_index, batch_dict in enumerate(batch_generator): # step 1. compute the output if isinstance(model,NMTModelWithMLTM): y_pred = model(batch_dict['x_source'], batch_dict['x_source_mltm_vector'], batch_dict['x_source_length'], batch_dict['x_target']) else: y_pred = model(batch_dict['x_source'], batch_dict['x_source_length'], batch_dict['x_target']) acc_t = compute_accuracy(y_pred, batch_dict['y_target'], self.mask_index) acc_sum += acc_t return acc_sum / (batch_index+1)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def check_test_accuracy(self):\n print('\\n# Evaluate on test data')\n results = self.model.evaluate(self.data.test_dataset)\n print('\\ntest loss, test acc:', results)", "def get_accuracy(self) -> float:\n self.network.load_data()\n self.network.train()\n\n n = len(self.network.y_t...
[ "0.78987217", "0.7703377", "0.76911086", "0.76736206", "0.7662402", "0.7599939", "0.75526947", "0.7543788", "0.7528232", "0.7517148", "0.74728966", "0.74700576", "0.74215174", "0.73989594", "0.73596543", "0.7353871", "0.7339963", "0.73214674", "0.73190075", "0.7318403", "0.72...
0.71598864
25
Runs a training procedure on a PyTorch module using the dataset and loss function.
def train_model(self,model): train_state = {'stop_early': False, 'early_stopping_step': 0, 'early_stopping_best_val': 1e8, 'learning_rate': self.lr, 'epoch_index': 0, 'train_loss': [], 'val_loss': [], 'best_model':model} dataset = self.dataset loss_fn = self.loss_fn dataset.set_split('train') print("Training module with "+str(len(dataset))+" examples") data_loader = DataLoader(dataset,batch_size=self.batch_size,shuffle=True, drop_last=True) optimizer = optim.Adam(model.parameters(), lr=self.lr) for epoch in range(self.epochs): train_state['epoch_index'] = epoch #First step in each epoch is to train over all batches model.train() dataset.set_split('train') train_loss = 0 for b_i,batch_data in enumerate(data_loader): #Step 1: zero gradients optimizer.zero_grad() #Step 2: run forward X = batch_data['x'] output = model(X) #Step 3: compute loss target = batch_data['y'] loss = loss_fn(output,target) #Step 4: run backward loss.backward() #Step 5: update optimizer.step() #Record accumulated loss new_loss = loss.item() train_loss += new_loss train_loss /= b_i train_state['train_loss'].append(train_loss) #After training, compute loss on validation set and check for early stop model.eval() dataset.set_split('val') val_loss = 0 for b_i,batch_data in enumerate(data_loader): #Step 1: run forward X = batch_data['x'] output = model(X) #Step 2: compute loss target = batch_data['y'] loss = loss_fn(output,target) #Record accumulated loss new_loss = loss.item() val_loss += new_loss val_loss /= b_i train_state['val_loss'].append(val_loss) print("Finished epoch "+str(epoch+1)+". Train loss="+\ str(train_loss)+", Val loss="+str(val_loss)) if val_loss < train_state['early_stopping_best_val']: #new best model, reset stopping counter, store model train_state['early_stopping_step'] = 0 train_state['early_stopping_best_val'] = val_loss best_model = copy.deepcopy(model) best_model.load_state_dict(model.state_dict()) train_state['best_model'] = best_model else: #val loss not improved; increase early stopping counter train_state['early_stopping_step'] += 1 if train_state['early_stopping_step'] >= self.early_stopping_criteria: train_state['stop_early'] = True print("Val loss failed to improve. Stopping early.") break return train_state['best_model'],train_state
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def train(train_dataset: torch.utils.data.Dataset, test_dataset: torch.utils.data.Dataset,\n training_config: dict = train_config, global_config: dict = global_config):\n\n for path in global_config.values():\n create_dirs(path)\n\n # wrap datasets with Dataloader classes\n train_loader = ...
[ "0.7443008", "0.7346822", "0.7322582", "0.7247038", "0.6962793", "0.6957798", "0.6951179", "0.68994415", "0.6894855", "0.68578106", "0.684405", "0.6805708", "0.6804601", "0.67843026", "0.6783091", "0.67713857", "0.6762238", "0.67368186", "0.6734932", "0.6691646", "0.6680826",...
0.0
-1
Generates predictions and attentions for a batch.
def apply_to_batch(self, batch_dict): self._last_batch = batch_dict if isinstance(self.model,NMTModelWithMLTM): y_pred = self.model(x_source=batch_dict['x_source'], x_mltm=batch_dict['x_source_mltm_vector'], x_source_lengths=batch_dict['x_source_length'], target_sequence=batch_dict['x_target']) else: y_pred = self.model(x_source=batch_dict['x_source'], x_source_lengths=batch_dict['x_source_length'], target_sequence=batch_dict['x_target']) self._last_batch['y_pred'] = y_pred attention_batched = np.stack(self.model.decoder._cached_p_attn).transpose(1, 0, 2) self._last_batch['attention'] = attention_batched
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def generate_prediction_dicts(batch_dict, pred_dicts, class_names, output_path=None):\n raise NotImplementedError", "def predict_on_batch(engine, batch):\n\t\tengine.model.eval()\n\t\tengine.model.rpn.nms_thresh = 0.3\n\t\twith torch.no_grad():\n\t\t\timgs, target = prepare_batch(batch, device=get_device(...
[ "0.7235579", "0.7028992", "0.70033675", "0.69987303", "0.6998135", "0.6970026", "0.69600385", "0.6889559", "0.67980856", "0.6786844", "0.6781948", "0.6712437", "0.6697618", "0.66913795", "0.66129225", "0.65865463", "0.6565857", "0.6542319", "0.6535848", "0.65276676", "0.65126...
0.6662756
14
Splits a DataFrame into 3 distinct DataFrames based on the given percentages and returns a dict of the data.
def split_data(text_df,splits=None,rand_perm=True): if splits is None: splits = {'train':0.6,'val':0.1,'test':0.3} if np.round(np.sum(list(splits.values())),4) != 1: raise Exception("Split percentages do not sum to 1") size = len(text_df) if rand_perm: perm_idx = np.random.permutation(size) else: perm_idx = np.arange(size) text_df = text_df.iloc[perm_idx,:] all_data = dict() keys = list(splits.keys()) pct = list(splits.values()) count = np.round(np.array(pct) * size).astype(np.int32) split_idx = np.cumsum(count)[:-1] data_list = np.split(text_df,split_idx,axis=0) all_data = {keys[i]:data for i,data in enumerate(data_list)} return all_data
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def split_data(df_data, clusters):\n\n if clusters is None:\n\n return {0: df_data}\n\n return {\n k: df_data.loc[clusters.index[clusters == k]]\n for k in clusters.unique()\n }", "def split_train_dev_set(df, percent=0.2):\n train = []\n dev = []\n for k, g in df.groupby(\"...
[ "0.59946746", "0.5814314", "0.5597616", "0.55113435", "0.5504025", "0.54890805", "0.5455985", "0.54370016", "0.54187405", "0.54187405", "0.54108846", "0.54088694", "0.5390827", "0.5368966", "0.53635633", "0.53455263", "0.53044295", "0.5286926", "0.52688", "0.5268422", "0.5253...
0.58939797
1
Performs a standard classification test with the given classifier.
def classify(dataset,classifier,feat_mask=None): train = dataset.get_data('train',True) X_train = train['x'] if feat_mask is not None: X_train = X_train[:,feat_mask] y_train = train['y'] classifier.fit(X_train,y_train) test = dataset.get_data('test',True) X_test = test['x'] if feat_mask is not None: X_test = X_test[:,feat_mask] y_test = test['y'] pred = classifier.predict(X_test) acc = np.count_nonzero(pred==y_test) / len(y_test) return acc,y_test,pred
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def runClassifier(clf,title,xtrain,ytrain,xtest,ytest):\n # train the model using the classifier's fit function\n # use a dummy variable to avoid gibberish being printed\n clf.fit(xtrain, ytrain)\n\n # use the model to predict labels for the test set\n # note: this step is redundant if you just want...
[ "0.71080446", "0.7000874", "0.685494", "0.66270775", "0.66078466", "0.6513761", "0.64321357", "0.6407826", "0.63719994", "0.636399", "0.635611", "0.62839556", "0.6274965", "0.6272943", "0.62652", "0.62624466", "0.6240519", "0.62150085", "0.6208279", "0.6197915", "0.61721116",...
0.6559476
5
Reads a English > French text file and filters the lines based on the given filter_fn. If filter_fn is None, the default filter will be
def filter_nmt_file(filename,filter_fn=None): if filter_fn is None: filter_fn = lambda en : en.lower().startswith('i am') or \ en.lower().startswith('he is') or \ en.lower().startswith('she is') or \ en.lower().startswith('they are') or \ en.lower().startswith('you are') or \ en.lower().startswith('we are') filtered_lines = [] with open(filename) as file: lines = file.readlines() for line in lines: text = line.split('\t') en = text[0] fra = text[1] if filter_fn(en): filtered_lines.append(en.lower() + '\t' + fra.lower()) return filtered_lines
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _load_filter(self, fname, interp=True, lamb=None, *args, **kwargs):\n try:\n fil = UnitFilter.from_ascii(fname, *args, **kwargs)\n except Exception:\n content = self.content\n r = [k for k in content if fname in k]\n\n if len(r) <= 0: # try all lower f...
[ "0.594911", "0.5432073", "0.5307598", "0.5264026", "0.5253835", "0.52506196", "0.5130263", "0.49939936", "0.49880716", "0.49808767", "0.4957064", "0.494303", "0.49124965", "0.49102247", "0.49089", "0.48704535", "0.48687115", "0.48383683", "0.48080763", "0.47974768", "0.479605...
0.73677015
0
Given a list of lines of English/French text, creates a DataFrame with train/val/test split labels.
def create_nmt_data(text,train_pct=0.7,val_pct=0.15): if train_pct + val_pct >= 1: raise Exception("train_pct + val_pct must be < 1.0") source = [] target = [] for line in text: text = line.split('\t') source.append(text[0]) target.append(text[1]) text_df = pd.DataFrame({'source_language':source,'target_language':target}) text_df['split'] = 'train' text_df = text_df.sample(frac=1).reset_index(drop=True) idx = int(len(text_df)*train_pct) text_df.loc[:idx,'split'] = 'train' idx2 = idx + int(len(text_df)*val_pct) text_df.loc[idx:idx2,'split'] = 'val' text_df.loc[idx2:,'split'] = 'test' return text_df
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def create_training_data_file(list_of_word_lines, language):\r\n # To store each feature vector\r\n feature_vector = []\r\n\r\n # To store the entire dataset\r\n data = []\r\n\r\n for sentence in list_of_word_lines:\r\n\r\n # Contains Q\r\n CONTAINS_Q = 'N'\r\n\r\n # Contains Q\...
[ "0.6267126", "0.6177644", "0.6156957", "0.614642", "0.6129366", "0.612271", "0.6122403", "0.60591143", "0.5948062", "0.59287256", "0.592589", "0.5915757", "0.58811563", "0.5854081", "0.58518916", "0.5847722", "0.58189434", "0.58174044", "0.58112806", "0.58017504", "0.5774996"...
0.6742829
0
Reads a glove word embedding text file and generates a DataFrame with the embeddings.
def process_glove_data(filename): word_list = [] embed_list = [] with open(filename,encoding="utf8") as file: lines = file.readlines() for line in lines: toks = line.split(' ') word_list.append(toks[0]) vec = [float(tok) for tok in toks[1:]] embed_list.append(vec) embed = np.array(embed_list,dtype=float) embed_df = pd.DataFrame(embed,index=word_list) embed_df.index = embed_df.index.str.lower() return embed_df
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def load_embeddings(filename):\n labels = []\n rows = []\n with open(filename, encoding='utf-8') as infile:\n for i, line in enumerate(infile):\n items = line.rstrip().split(' ')\n if len(items) == 2:\n # This is a header row giving the shape of the matrix\n ...
[ "0.7637871", "0.72984564", "0.72906333", "0.7238396", "0.7030938", "0.6931128", "0.69254005", "0.6910939", "0.6879712", "0.6877971", "0.68639934", "0.6831232", "0.6824664", "0.68067014", "0.679522", "0.6794145", "0.6751845", "0.67482585", "0.67427385", "0.66889936", "0.666392...
0.7851951
0
Creates a Tensor for use as an Embedding initialization from the source vocabulary and predefined word embeddings.
def get_pretrained_embeddings(source_vocab,embed_df): num_tokens = len(source_vocab) embedding_dim = embed_df.shape[1] weights = np.zeros((num_tokens,embedding_dim),dtype=np.float32) for idx in range(num_tokens): token = source_vocab.lookup_index(idx) if token in embed_df.index: weights[idx,:] = embed_df.loc[token] else: weights[idx,:] = np.random.randn(1,embedding_dim) embed_tensor = torch.FloatTensor(weights) return embed_tensor
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def build_embedding_layer(inputs_, vocab_size, embed_size):\n embedding = tf.Variable(tf.random_uniform((vocab_size, embed_size), -1, 1))\n embed = tf.nn.embedding_lookup(embedding, inputs_)\n \n return embed", "def init_word_embed(config):\n embedding_mat_val = np.load(config.wordembed_params)\n ...
[ "0.74340177", "0.7306449", "0.72952855", "0.70558393", "0.7044115", "0.68895066", "0.67812735", "0.6750932", "0.67492104", "0.67476356", "0.67427427", "0.6717155", "0.6711093", "0.6680487", "0.6676144", "0.6672202", "0.6647878", "0.6621589", "0.66202843", "0.65980107", "0.658...
0.66311824
17
Evaluates the trained model on the test set using the bleu_score method from NLTK.
def eval_nmt_bleu(model,dataset,vectorizer,args): model = model.eval().to(args.device) sampler = NMTSamplerWithMLTM(vectorizer, model) dataset.set_split('test') batch_generator = generate_nmt_batches(dataset, batch_size=args.batch_size, device=args.device) test_results = [] for batch_dict in batch_generator: sampler.apply_to_batch(batch_dict) for i in range(args.batch_size): test_results.append(sampler.get_ith_item(i, False)) bleu4 = np.array([r['bleu-4'] for r in test_results])*100 return np.mean(bleu4),bleu4
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def evaluate_bleu_score(cls, predictions, targets, ticks=False, corpus=True):\n if ticks:\n ref_sentences = cls._ticks_to_sentences(targets)\n cand_sentences = cls._ticks_to_sentences(predictions)\n else:\n ref_sentences = [[str(x) for x in seq] for seq in predictions...
[ "0.7001942", "0.69130796", "0.68518585", "0.68095434", "0.6744196", "0.6740567", "0.6694026", "0.6630554", "0.6620787", "0.66182154", "0.65482765", "0.6532654", "0.65137815", "0.6487489", "0.64665717", "0.64459497", "0.6421892", "0.64201504", "0.6412638", "0.64110035", "0.639...
0.6284775
32
Selftest function will try to connect to the LDAP instance. Fail if any exceptions are raised.
def selftest_function(opts): domains_list = get_domains_list(opts) ldap = LDAPDomains(opts) state = "success" reason = "N/A" domain = "N/A" conn = "" for domain_name in domains_list: try: """ If labels are given to the servers in the app.config `domain_name` will start with 'fn_ldap_utilities:' else if labels are not given then `domain_name` will equal 'fn_ldap_utilites'. If `domain_name` contains ':' then a labels have been given to the servers and `domain` will be set to the label given to the server else if `domain_name` does not contain ':' then servers have not been labled and `domain` will be set to `domain_name` which will equal 'fn_ldap_utilities'. """ domain = domain_name[domain_name.index(":")+1:] if ":" in domain_name else domain_name # Instansiate helper (which gets appconfigs from file) helper = LDAPUtilitiesHelper(ldap.ldap_domain_name_test(domain, domains_list)) options = opts.get(domain_name, {}) log.info(f"Verifying app.config values for {str(options.get('ldap_server'))} config section") # Instansiate LDAP Server and Connection conn = helper.get_ldap_connection() # Bind to the connection log.info("Verifying LDAP connection...") conn.bind() log.info("Test was successful\n") except Exception as err: state = "failure" reason = err break finally: # Unbind connection if conn: conn.unbind() if state == "success": return {"state": state} return { "state": state, "reason": reason, "domain": domain }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_simple_auth_error(self):\n client = LDAPClient(self.url)\n client.set_credentials(\"SIMPLE\", (\"cn=wrong\", \"wronger\"))\n self.assertRaises(bonsai.AuthenticationError, client.connect)", "def connect(self):\n conf = self.conf\n\n if not conf.uris or not conf.base:\n ...
[ "0.7001593", "0.6606929", "0.65416896", "0.6496196", "0.64913446", "0.6485445", "0.6349957", "0.61097586", "0.6085969", "0.60859203", "0.6060914", "0.60118145", "0.6002777", "0.5980626", "0.59615344", "0.59526885", "0.59469163", "0.5924804", "0.5921622", "0.5895566", "0.58762...
0.6377324
6
Processes a list of splits by modifying any positions as needed.
def handle_splits(self, splits): total_leftover_cash = 0 for instrument, ratio in splits: if instrument in self.positions: self._dirty_stats = True # Make the position object handle the split. It returns the # leftover cash from a fractional share, if there is any. position = self.positions[instrument] leftover_cash = position.handle_split(instrument, ratio) total_leftover_cash += leftover_cash return total_leftover_cash
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def split(self, splits, catchall=False):\r\n raise NotImplementedError()", "def _setup_splits(self):\n #ntot = self.reredux_conf['nperfile']\n ntot = self.reredux_conf['Ngals']\n npersplit = self.runconf['nper']\n\n self.beglist, self.endlist = get_splits(ntot, npersplit)", "def ...
[ "0.69268346", "0.57384413", "0.5705373", "0.5634635", "0.56108665", "0.5545612", "0.5541551", "0.5517076", "0.5466351", "0.54659456", "0.54127634", "0.5412377", "0.53936285", "0.5370275", "0.53439856", "0.5326818", "0.53085357", "0.5286673", "0.5274086", "0.5254431", "0.52331...
0.6112161
1
Given a list of dividends whose ex_dates are all the next trading day, calculate and store the cash and/or stock payments to be paid on each dividend's pay date.
def earn_dividends(self, cash_dividends, stock_dividends): for cash_dividend in cash_dividends: self._dirty_stats = True # only mark dirty if we pay a dividend # Store the earned dividends so that they can be paid on the # dividends' pay_dates. div_owed = self.positions[cash_dividend.instrument].earn_dividend( cash_dividend, ) try: self._unpaid_dividends[cash_dividend.pay_date].append(div_owed) except KeyError: self._unpaid_dividends[cash_dividend.pay_date] = [div_owed] for stock_dividend in stock_dividends: self._dirty_stats = True # only mark dirty if we pay a dividend div_owed = self.positions[ stock_dividend.instrument ].earn_stock_dividend(stock_dividend) try: self._unpaid_stock_dividends[stock_dividend.pay_date].append( div_owed, ) except KeyError: self._unpaid_stock_dividends[stock_dividend.pay_date] = [ div_owed, ]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def pay_dividends(self, next_trading_day):\n net_cash_payment = 0.0\n\n try:\n payments = self._unpaid_dividends[next_trading_day]\n # Mark these dividends as paid by dropping them from our unpaid\n del self._unpaid_dividends[next_trading_day]\n except KeyError...
[ "0.6943807", "0.61015797", "0.59446824", "0.5888466", "0.575217", "0.57305545", "0.57155085", "0.55656976", "0.5565659", "0.55222803", "0.55169284", "0.54701203", "0.5462021", "0.53821945", "0.53751284", "0.53381366", "0.5314847", "0.53086144", "0.5304677", "0.529244", "0.526...
0.62604964
1
Returns a cash payment based on the dividends that should be paid out according to the accumulated bookkeeping of earned, unpaid, and stock dividends.
def pay_dividends(self, next_trading_day): net_cash_payment = 0.0 try: payments = self._unpaid_dividends[next_trading_day] # Mark these dividends as paid by dropping them from our unpaid del self._unpaid_dividends[next_trading_day] except KeyError: payments = [] # representing the fact that we're required to reimburse the owner of # the stock for any dividends paid while borrowing. for payment in payments: net_cash_payment += payment['amount'] # Add stock for any stock dividends paid. Again, the values here may # be negative in the case of short positions. try: stock_payments = self._unpaid_stock_dividends[next_trading_day] except KeyError: stock_payments = [] for stock_payment in stock_payments: payment_instrument = stock_payment['payment_instrument'] share_count = stock_payment['share_count'] # note we create a Position for stock dividend if we don't # already own the instrument if payment_instrument in self.positions: position = self.positions[payment_instrument] else: position = self.positions[payment_instrument] = Position( payment_instrument, ) position.amount += share_count return net_cash_payment
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def cash_flow(self):\n _cash_flow = self.after_tax_profit() + self.depreciation()\n return _cash_flow", "def cash(self, qtt_100s, qtt_50s, qtt_20s):\n return (qtt_100s * 100) + (qtt_50s * 50) + (qtt_20s * 20)", "def test_discounted_payment_below_debit(self):\n debit_jobs([(self.job,...
[ "0.65616626", "0.6471666", "0.6241464", "0.6134051", "0.6083674", "0.6017929", "0.59529567", "0.59450686", "0.57963526", "0.5787191", "0.5777359", "0.57426834", "0.5719649", "0.56672525", "0.5616136", "0.5607058", "0.55491143", "0.5521608", "0.5502035", "0.5492246", "0.545119...
0.7525171
0
The current status of the positions. Returns
def stats(self): if self._dirty_stats: calculate_position_tracker_stats(self.positions, self._stats) self._dirty_stats = False return self._stats
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def getstatus(self):\n with self.lock:\n return (self.status, self.time_start)", "def status(self):\n\t\treturn self._status", "def status(self):\n pass", "def status(self):\n pass", "def status(self):\n return self.state", "def status(self):", "def status(self):\...
[ "0.7082658", "0.7037973", "0.7028998", "0.7028998", "0.70281035", "0.70246303", "0.6955653", "0.6926556", "0.68866", "0.68866", "0.68487513", "0.68487513", "0.68325317", "0.6804291", "0.6804291", "0.6804291", "0.6804291", "0.6804291", "0.6804291", "0.6804291", "0.6804291", ...
0.0
-1
Add a transaction to ledger, updating the current state as needed.
def process_transaction(self, transaction): instrument = transaction.instrument if isinstance(instrument, Future): try: old_price = self._payout_last_sale_prices[instrument] except KeyError: self._payout_last_sale_prices[instrument] = transaction.price else: position = self.position_tracker.positions[instrument] amount = position.amount price = transaction.price self._cash_flow( self._calculate_payout( instrument.multiplier, amount, old_price, price, ), ) if amount + transaction.amount == 0: del self._payout_last_sale_prices[instrument] else: self._payout_last_sale_prices[instrument] = price else: self._cash_flow(-(transaction.price * transaction.amount)) self.position_tracker.execute_transaction(transaction) # we only ever want the dict form from now on transaction_dict = transaction.to_dict() try: self._processed_transactions[transaction.dt].append( transaction_dict, ) except KeyError: self._processed_transactions[transaction.dt] = [transaction_dict]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def addTransaction(self, transaction):\n self.transactions.append(transaction)\n self.transactionIDs.add(transaction.id)", "def add(self, transaction):\n if isinstance(transaction, Transaction):\n # If the transaction already exists\n if(transaction.hash in self.transac...
[ "0.73368466", "0.7106181", "0.6942782", "0.66168725", "0.65834856", "0.6570095", "0.6479877", "0.64787537", "0.6422333", "0.64186", "0.6378191", "0.63757354", "0.63565505", "0.63033175", "0.630115", "0.6284071", "0.6255765", "0.62522966", "0.6241244", "0.6228167", "0.6210412"...
0.0
-1
Retrieve the dictform of all of the transactions in a given bar or for the whole simulation.
def transactions(self, dt=None): if dt is None: # flatten the by-day transactions return [ txn for by_day in itervalues(self._processed_transactions) for txn in by_day ] return self._processed_transactions.get(dt, [])
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def transaction_data(self):\n return list(map(lambda transaction:transaction.to_json(), self.transaction_map.values()))", "def transaction_base() -> Dict[str, Any]:\n return {\n \"first_name\": \"Donald\",\n \"last_name\": \"Duck\",\n \"company\": \"Duck Co\",\n \"email\": \...
[ "0.63048834", "0.602332", "0.5960751", "0.56639814", "0.56584823", "0.5563182", "0.5531879", "0.55191296", "0.55072176", "0.5477383", "0.54718333", "0.5458796", "0.5426807", "0.54197216", "0.541453", "0.5338547", "0.5327534", "0.53225285", "0.53161556", "0.5279584", "0.526199...
0.0
-1
Force a computation of the current portfolio state.
def update_portfolio(self): if not self._dirty_portfolio: return portfolio = self._portfolio pt = self.position_tracker portfolio.positions = pt.get_positions() position_stats = pt.stats portfolio.positions_value = position_value = ( position_stats.net_value ) portfolio.positions_exposure = position_stats.net_exposure self._cash_flow(self._get_payout_total(pt.positions)) start_value = portfolio.portfolio_value # update the new starting value portfolio.portfolio_value = end_value = portfolio.cash + position_value pnl = end_value - start_value if start_value != 0: returns = pnl / start_value else: returns = 0.0 portfolio.pnl += pnl portfolio.returns = ( (1 + portfolio.returns) * (1 + returns) - 1 ) # the portfolio has been fully synced self._dirty_portfolio = False
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def update_portfolio(self, portfolio: PortfolioController):\n now = portfolio.get_history(seconds_back=0)\n future = portfolio.get_history(seconds_back=-self.update_interval)\n\n for fund in portfolio.funds:\n best_currency = max(portfolio.currencies, key=lambda currency: future_val...
[ "0.60446954", "0.5951977", "0.5695806", "0.5687665", "0.56560236", "0.5653465", "0.56520754", "0.5633276", "0.559409", "0.5562694", "0.54696906", "0.54538536", "0.5440695", "0.5416885", "0.5380987", "0.5366457", "0.5364525", "0.5331849", "0.5323494", "0.53195906", "0.53195906...
0.66357535
0
Compute the current portfolio. Notes This is cached, repeated access will not recompute the portfolio until the portfolio may have changed.
def portfolio(self): self.update_portfolio() return self._immutable_portfolio
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def update_portfolio(self):\n if not self._dirty_portfolio:\n return\n\n portfolio = self._portfolio\n pt = self.position_tracker\n\n portfolio.positions = pt.get_positions()\n position_stats = pt.stats\n\n portfolio.positions_value = position_value = (\n ...
[ "0.7362575", "0.6996456", "0.6729907", "0.6721263", "0.6675463", "0.66379255", "0.6499", "0.63661253", "0.63091534", "0.62968546", "0.6240645", "0.6196709", "0.6144556", "0.60243994", "0.5963545", "0.5962163", "0.59054154", "0.5858664", "0.5812279", "0.5781019", "0.5715677", ...
0.81566226
0
Override fields on ``self.account``.
def override_account_fields(self, settled_cash=not_overridden, accrued_interest=not_overridden, buying_power=not_overridden, equity_with_loan=not_overridden, total_positions_value=not_overridden, total_positions_exposure=not_overridden, regt_equity=not_overridden, regt_margin=not_overridden, initial_margin_requirement=not_overridden, maintenance_margin_requirement=not_overridden, available_funds=not_overridden, excess_liquidity=not_overridden, cushion=not_overridden, day_trades_remaining=not_overridden, leverage=not_overridden, net_leverage=not_overridden, net_liquidation=not_overridden): # mark that the portfolio is dirty to override the fields again self._dirty_account = True self._account_overrides = kwargs = { k: v for k, v in locals().items() if v is not not_overridden } del kwargs['self']
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def account(self, account):\n\n self._account = account", "def account(self, account):\n\n self._account = account", "def account(self, account):\n\n self._account = account", "def account(self, account):\n\n self._account = account", "def patch(self, account=None, user=None, ac...
[ "0.6413226", "0.6413226", "0.6413226", "0.6413226", "0.6343206", "0.6329128", "0.6322581", "0.63135093", "0.61441493", "0.60770935", "0.60361296", "0.6022793", "0.60135746", "0.60068023", "0.59981", "0.59467715", "0.58997744", "0.5875439", "0.5858766", "0.5787134", "0.5765871...
0.67399603
0
Initializing method. Always starts with player 'X' going first. Also creates a blank board to begin playing on.
def __init__(self): self.current = Piece.EX self.board = [Piece.BLANK, Piece.BLANK, Piece.BLANK, Piece.BLANK, Piece.BLANK, Piece.BLANK, Piece.BLANK, Piece.BLANK, Piece.BLANK]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __init__(self):\n # Current player\n self.player = X\n\n # Board\n self.board = [\n [None, None, None],\n [None, None, None],\n [None, None, None]\n ]\n\n # Winner\n self.winner = None\n\n # Game over\n self._gameov...
[ "0.76133895", "0.74191046", "0.7272769", "0.7215476", "0.70663446", "0.7060645", "0.7049015", "0.7029267", "0.69543284", "0.69497305", "0.692294", "0.6910041", "0.6903089", "0.6872871", "0.6863269", "0.68311656", "0.68238926", "0.68191206", "0.6769023", "0.67465365", "0.67353...
0.6897718
13
Switches whose turn it is.
def switchPlayer(self): if (self.current is Piece.EX): self.current = Piece.OH else: self.current = Piece.EX
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _switch_turn(self, cur_player):\n if cur_player == \"W\":\n self._turn = \"B\"\n else:\n self._turn = \"W\"", "def turn(self):\n pass", "def get_switches(self) -> tuple:\n return self.switches", "def changeTurn(self):\n\t\tif self.turn == 1:\n\t\t\tself.t...
[ "0.68772453", "0.6781878", "0.6612293", "0.65573156", "0.6287292", "0.62218165", "0.6109247", "0.6078702", "0.6077556", "0.5993894", "0.5986418", "0.59811705", "0.5940087", "0.59389865", "0.59333205", "0.59090734", "0.5887211", "0.58654636", "0.5862874", "0.58553296", "0.5841...
0.5775022
23
Trys to make a move. If the move is successful returns 1. If the move string is not able to interpreted correctly or if that place is already full the move fails and the function returns 0
def makeMove(self, move): try: if (self.board[int(move) - 1] is Piece.BLANK): self.board[int(move) - 1] = self.current return 1 else: return 0 except: return 0
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def move_valid(move):\n return True", "def handle_move(self, move_string):\n def map_move(move):\n col = int(ascii_lowercase.find(move[0])) + 1 # dummy col\n row = int(move[1:])\n # if not 0 < col <= game[\"board_width\"]:\n # raise ValueError('bad coord;...
[ "0.70150155", "0.676564", "0.6584836", "0.6568262", "0.6568117", "0.6563307", "0.6558569", "0.64923847", "0.64853036", "0.6477489", "0.645601", "0.6451311", "0.6451188", "0.64166987", "0.6414095", "0.63909614", "0.63762623", "0.636292", "0.6332474", "0.63271296", "0.63165474"...
0.6508602
7
Returns the winning peice if the game is over. If the game is a draw it returns the empty peice, and if the game is not over returns false.
def isOver(self): isFull = Piece.BLANK for a,b,c in [[0, 1, 2], [3, 4, 5], [6, 7, 8], [0, 3, 6], [1, 4, 7], [2, 5, 8], [0, 4, 8], [2, 4, 6]]: if (self.board[a] is self.board[b] is self.board[c] and self.board[a] is not Piece.BLANK): return self.board[a] if (self.board[a] is Piece.BLANK or self.board[b] is Piece.BLANK or self.board[c] is Piece.BLANK): isFull = False return isFull
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def gameOver():\n if len(p1)==0 and len(p1winnings)==0:\n return True\n elif len(p2)==0 and len(p2winnings)==0:\n return True\n return False", "def is_game_over(self):\r\n\r\n if self.winner != 0:\r\n return True\r\n\r\n return False", "def is_game_over(sel...
[ "0.75529164", "0.74890757", "0.73833823", "0.73335296", "0.7333373", "0.72050595", "0.70841414", "0.705282", "0.7051845", "0.70401967", "0.6998241", "0.69875365", "0.6977598", "0.6947065", "0.69441944", "0.6942049", "0.69409615", "0.69100434", "0.6907182", "0.6895013", "0.689...
0.691553
17
Returns a string interpretation of the board.
def __str__(self): boardString = "\n{0}|{1}|{2}\n-----\n{3}|{4}|{5}\n-----\n{6}|{7}|{8}\n" return boardString.format(self.board[0], self.board[1], self.board[2], self.board[3], self.board[4], self.board[5], self.board[6], self.board[7], self.board[8])
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def board_string(self):\n s = \"\"\n for i, v in enumerate(self.board):\n # if i % 81 == 0:\n # s += \"\\n\"\n if v is None:\n s += \"0\"\n else:\n if v.color == StoneColor.black:\n s += \"1\"\n ...
[ "0.84243935", "0.8055579", "0.8017218", "0.78294134", "0.7797174", "0.77931315", "0.7776164", "0.77571356", "0.77145636", "0.7699537", "0.7682327", "0.7632191", "0.76316303", "0.76192135", "0.7602081", "0.7599799", "0.758553", "0.7570357", "0.75595516", "0.7550093", "0.753793...
0.79764
3
The main method for running the game.
def main(): print("Welcome to TicTacToe") board = Board() while (not board.isOver()): print("It is {0}'s turn".format(board.current) + board.__str__()) move = input('Where would you like to go? : ').strip() if (move == 'q'): break elif (board.makeMove(move) == 1): board.switchPlayer() else: print("I didn't understand your input, these are the valid inputs:\nentering 'q' will quit out of the game.\n") print("entering a number will place the peice in that box, the numbers are as follows:\n \n1|2|3\n-----\n4|5|6\n-----\n7|8|9\n") print(board.__str__() + "\nGame Over") if (board.isOver() is Piece.EX or board.isOver() is Piece.OH): print("Player {0} wins!".format(board.isOver())) else: print("It was a draw")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def main():\n g = Game(800, 600)\n g.start()", "def main():\r\n gameclass = data.game.GameClass()\r\n gameclass.main_loop()", "def main():\n game = RiichiMahjongApp()\n game.run()", "def main():\n g = DemoGame(800, 600)\n g.start()", "def main():\n game = Game(TIMES, HARDNESS)\n ...
[ "0.85276115", "0.8359565", "0.8280766", "0.8256087", "0.8189385", "0.79771304", "0.78113306", "0.7789521", "0.7754325", "0.77015805", "0.7665701", "0.7611436", "0.76085716", "0.7576111", "0.7546943", "0.7546619", "0.7520529", "0.7456335", "0.74490446", "0.74308777", "0.738893...
0.0
-1
Remove temporary partition files from disk. The removed files' names are deleted from the _temporary_files set. The intended use is to delete individual files as part of the garbage collection process and to delete all files when python exits. This is quite brutal and may break partitions if used unwisely. It is not recommended to be used as a general tidyup function.
def _remove_temporary_files(filename=None): if filename is not None: if filename in _temporary_files: # If this condition is not met then probably # _remove_temporary_files() has already been run at # exit dirname, _lock_file, _other_lock_files = _temporary_files[filename] try: remove(_lock_file) except OSError: pass # Only remove the temporary file if it is not being # used by any other ranks if not _lock_files_present(_other_lock_files): # Remove the given temporary file try: remove(filename) rmdir(dirname) except OSError: pass del _temporary_files[filename] # --- End: if return # Still here? Then remove all temporary files and lock files for filename in _temporary_files: try: remove(filename) except OSError: pass dirname, _lock_file, _other_lock_files = _temporary_files[filename] try: remove(_lock_file) except OSError: pass for lock_file in _other_lock_files: try: remove(lock_file) except OSError: pass # --- End: for try: rmdir(dirname) except OSError: pass # --- End: for _temporary_files.clear()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def remove_temporary_files():\n try:\n xml_file_path, bin_file_path = get_ida_exported_files()\n if os.path.isfile(xml_file_path):\n os.remove(xml_file_path)\n\n if os.path.isfile(bin_file_path):\n os.remove(bin_file_path)\n\n except Exception:\n print(\"GhID...
[ "0.7633441", "0.74177027", "0.72147197", "0.7178067", "0.6853436", "0.6829257", "0.67934287", "0.6738931", "0.67164594", "0.66771114", "0.66554505", "0.66493994", "0.6645599", "0.66091245", "0.65514976", "0.6517317", "0.6492228", "0.64918196", "0.6469804", "0.6463625", "0.638...
0.7262391
2
Used if copy.deepcopy is called on the variable.
def __deepcopy__(self, memo): return self.copy()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def varcopy(self, vars):", "def copy(self):\n pass", "def copy(self):\n pass", "def copy(self):\n pass", "def copy(self):", "def shallow_copy(self):\n # TODO: Rename this to __copy__()?\n raise NotImplementedError(\"shallow_copy is not implemented\")", "def __copy__(s...
[ "0.6630489", "0.63350475", "0.63350475", "0.63350475", "0.6280567", "0.6222454", "0.6153182", "0.6143717", "0.61114347", "0.6110205", "0.6047252", "0.5978211", "0.5953629", "0.59274524", "0.59274524", "0.59274524", "0.58838916", "0.5878963", "0.5824744", "0.5819304", "0.57938...
0.60275984
11
Called when the partition's reference count reaches zero. If the partition contains a temporary file which is not referenced by any other partition then the temporary file is removed from disk. If the partition contains a nontemporary file which is not referenced by any other partition then the file is closed.
def __del__(self): # subarray = getattr(self, '_subarray', None) subarray = self._subarray # If the subarray is unique it will have 2 references to # it plus 1 within this method, making 3. If it has more # than 3 references to it then it is not unique. if getrefcount is not None: self._decrement_file_counter() if subarray is None or getrefcount(subarray) > 3: return else: # getrefcount has itself been deleted or is in the process # of being torn down return _partition_file = getattr(subarray, "_partition_file", None) if _partition_file is not None: # This partition contains a temporary file which is not # referenced by any other partition on this process, so if # there are no lock files present remove the file from # disk. _remove_temporary_files(_partition_file) else: try: if FileArray is not None and isinstance(subarray, FileArray): try: filename = subarray.get_filename() except Exception: filename = None if self.file_counter.get(filename, 999) <= 0: # This partition contains a non-temporary file # which is not referenced by any other # partitions, so close the file. subarray.close() except Exception: # If we're here then it is likely that FileArray has been # torn down, so just do nothing. pass # --- End: if
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_file_deleted(self):\n try:\n with get_temp_file() as (fd, name):\n os.unlink(name)\n except Exception as err:\n self.fail('Failed with exception \"{}\"'.format(err))", "def _Close(self):\n self._fsfat_volume = None\n self._file_object = None", "...
[ "0.637165", "0.6141179", "0.6079966", "0.60165036", "0.5893113", "0.5771018", "0.5769767", "0.5752214", "0.5731687", "0.5730452", "0.57243013", "0.5715975", "0.5715163", "0.5711628", "0.5696097", "0.56944114", "0.56878215", "0.56815344", "0.5681272", "0.5633728", "0.5630855",...
0.61570215
1
Add i to the count of subarrays referencing the file of this partition's subarray. Only do this if self._subarray is an instance of FileArray, but not a temporary FileArray.
def _add_to_file_counter(self, i): # subarray = getattr(self, '_subarray', None) subarray = self._subarray if subarray is None: return try: if isinstance(subarray, FileArray) and not isinstance( subarray, CachedArray ): try: filename = subarray.get_filename() except Exception: filename = None if filename is None: return file_counter = self.file_counter # count = file_counter.get(filename, 0) # file_counter[filename] = count + i # if file_counter[filename] <= 0: count = file_counter.get(filename, 0) + i if count <= 0: # Remove the file from the dictionary if its count has # dropped to zero file_counter.pop(filename, None) else: file_counter[filename] = count except Exception: # If we're here then it is likely that FileArray has been # torn down, so just do nothing. pass
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def n_subfile(self):\n self.assert_is_dir_and_exists()\n n = 0\n for _ in self.select_file(recursive=False):\n n += 1\n return n", "def update(self, i, v):\n # index in BTree is 1 more than index in arr[]\n i += 1\n\n # Traverse to ancestors of BITree[i...
[ "0.5323221", "0.5309046", "0.5237938", "0.52028364", "0.51664454", "0.5117871", "0.51150346", "0.5107957", "0.50491905", "0.50423014", "0.5001557", "0.499577", "0.49840355", "0.4932267", "0.4909338", "0.48659304", "0.48641643", "0.48607743", "0.48591715", "0.4854085", "0.4824...
0.8270168
0
Add 1 to the Partition.file_counter if self._subarray is an instance of FileArray and not a temporary FileArray.
def _increment_file_counter(self): self._add_to_file_counter(1)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _add_to_file_counter(self, i):\n # subarray = getattr(self, '_subarray', None)\n subarray = self._subarray\n\n if subarray is None:\n return\n\n try:\n if isinstance(subarray, FileArray) and not isinstance(\n subarray, CachedArray\n ...
[ "0.83802605", "0.6079216", "0.60073787", "0.5972287", "0.5871502", "0.58649766", "0.58356875", "0.5813897", "0.5656929", "0.5650024", "0.5628978", "0.5588129", "0.5575472", "0.55536973", "0.55466735", "0.55383646", "0.5529106", "0.5502226", "0.54975855", "0.5481056", "0.54359...
0.71174276
1
Subtract 1 from the Partition.file_counter if self._subarray is an instance of FileArray and not a temporary FileArray.
def _decrement_file_counter(self): self._add_to_file_counter(-1)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _add_to_file_counter(self, i):\n # subarray = getattr(self, '_subarray', None)\n subarray = self._subarray\n\n if subarray is None:\n return\n\n try:\n if isinstance(subarray, FileArray) and not isinstance(\n subarray, CachedArray\n ...
[ "0.7747842", "0.63648546", "0.6072333", "0.60351396", "0.5918244", "0.57059807", "0.56674904", "0.5664642", "0.56031275", "0.5573989", "0.5520654", "0.5453089", "0.5448838", "0.54281026", "0.5422772", "0.5378375", "0.5307265", "0.5244931", "0.52217174", "0.5220032", "0.521629...
0.6648334
1
Add the auxiliary mask to the config dictionary. Assumes that ``self.config`` already exists.
def _configure_auxiliary_mask(self, auxiliary_mask): indices = self.indices new = [ mask[ tuple( [ (slice(None) if n == 1 else index) for n, index in zip(mask.shape, indices) ] ) ] for mask in auxiliary_mask ] # # If the partition is to be parallelised then get rid of mask # # components which are all False so the mask component does # # not get copied to the child process # if not config['serial']: # new = [mask for mask in new if not mask.any()] self.config["auxiliary_mask"] = new
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def configure_masking(self, masks):\n self.masks = masks", "def add_config(self):\n\n config = {\n 'invert_byte': InvertByte,\n 'invert_word': InvertWord,\n 'invert_double_word': InvertDoubleWord,\n 'and_byte': AndByte,\n 'and_word': AndWord,\n...
[ "0.5756575", "0.5562516", "0.54862016", "0.5367435", "0.5347834", "0.53455114", "0.5306387", "0.52775955", "0.522134", "0.5194576", "0.51778084", "0.5152401", "0.51056355", "0.51011837", "0.50790006", "0.5067728", "0.5065551", "0.4962702", "0.4946801", "0.4946075", "0.4918300...
0.71955097
0
The indices of the master array which correspond to this partition's data array.
def indices(self): return tuple([slice(*r) for r in self.location])
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def indices(self) -> np.ndarray:\n return self.impl.indices", "def master_ndindex(self): # itermaster_indices(self):\n return itertools_product(\n *[range(*r) for r in self.location]\n ) # TODO check", "def atom_idxs(self):\n\n return np.array([atom.atom_idxs for atom i...
[ "0.70275134", "0.6850407", "0.683511", "0.6796057", "0.6708753", "0.6668921", "0.6662458", "0.66586286", "0.6644368", "0.6611067", "0.66031367", "0.65675586", "0.65319914", "0.64623576", "0.6393266", "0.6372617", "0.6343201", "0.63220435", "0.6305004", "0.62970823", "0.628111...
0.62476814
23
True if and only if the partition's subarray is in memory as opposed to on disk.
def in_memory(self): return hasattr(self._subarray, "__array_interface__")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def in_memory(self) -> bool:\n return all(isinstance(x, np.ndarray) for x in self.chunks.values())", "def extra_memory(self):\n if not self.in_memory:\n # --------------------------------------------------------\n # The subarray is on disk so getting the partition's data\n ...
[ "0.7677807", "0.75874203", "0.73676527", "0.6567502", "0.6373806", "0.62520576", "0.6168975", "0.6139325", "0.61339194", "0.6126576", "0.611511", "0.608777", "0.6025861", "0.6025105", "0.60162103", "0.5953649", "0.59421575", "0.59196216", "0.5899218", "0.5882104", "0.5880964"...
0.7635218
1
True if and only if the partition's subarray is on disk in a temporary file.
def in_cached_file(self): return isinstance(self._subarray, CachedArray)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def on_disk(self):\n return isinstance(self._subarray, FileArray)", "def _are_features_already_extracted(self, output_path: str, subset: str) -> bool:\n file_path = join(output_path, subset + '.npy')\n return os.path.exists(file_path)", "def has_subfile(self) -> bool:\n\t\tself._update_sub...
[ "0.72301924", "0.65045905", "0.64364123", "0.5942543", "0.5936233", "0.591641", "0.5817689", "0.5676702", "0.5673538", "0.5658594", "0.5589181", "0.5555981", "0.5479751", "0.5440639", "0.5393117", "0.53697217", "0.5364029", "0.5358785", "0.5355252", "0.53492653", "0.53334475"...
0.58127344
7
True if and only if the partition's subarray is on disk as opposed to in memory.
def on_disk(self): return isinstance(self._subarray, FileArray)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def in_memory(self):\n return hasattr(self._subarray, \"__array_interface__\")", "def in_memory(self) -> bool:\n return all(isinstance(x, np.ndarray) for x in self.chunks.values())", "def extra_memory(self):\n if not self.in_memory:\n # ------------------------------------------...
[ "0.69458973", "0.67629313", "0.6696292", "0.656141", "0.63517463", "0.6290223", "0.61257756", "0.59961635", "0.59486187", "0.59276515", "0.59175736", "0.5881189", "0.5803704", "0.577553", "0.5774722", "0.5755425", "0.5737828", "0.57311875", "0.5723483", "0.5720749", "0.568287...
0.80375713
0
True if and only if the partition's subarray is on disk as opposed to in memory.
def in_file(self): return self.on_disk and not self.in_cached_file
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def on_disk(self):\n return isinstance(self._subarray, FileArray)", "def in_memory(self):\n return hasattr(self._subarray, \"__array_interface__\")", "def in_memory(self) -> bool:\n return all(isinstance(x, np.ndarray) for x in self.chunks.values())", "def extra_memory(self):\n if...
[ "0.80375713", "0.69458973", "0.67629313", "0.6696292", "0.656141", "0.63517463", "0.6290223", "0.61257756", "0.59961635", "0.59486187", "0.59276515", "0.59175736", "0.5881189", "0.577553", "0.5774722", "0.5755425", "0.5737828", "0.57311875", "0.5723483", "0.5720749", "0.56828...
0.5803704
13
The data type of the master array.
def dtype(self): return self.config["dtype"]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def datatype_name(self):\n return 'array'", "def data_type(self):\r\n return self._data_type", "def data_type(self):\n return self._data_type", "def data_type(self):\n return self._data_type", "def data_type(self):\n return self._data_type", "def dtype(self):\n r...
[ "0.7694177", "0.76688254", "0.75949335", "0.75949335", "0.75949335", "0.7573461", "0.74812984", "0.7376351", "0.7376351", "0.72733635", "0.7250659", "0.71817577", "0.7178896", "0.71689445", "0.7107126", "0.7105419", "0.70681125", "0.7060085", "0.7039309", "0.7015803", "0.7010...
0.69170535
30
True if and only if the partition's data array is a scalar array.
def isscalar(self): return not self.axes
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def is_scalar(obj: _std_typing.Any) -> bool:\n return obj.ndim == 0", "def is_scalar(x: Any) -> bool:\r\n return np.isscalar(x) or (isinstance(x, np.ndarray) and x.ndim == 0)", "def is_array(self):\n return False", "def is_scalar(self):\n return len(self.coeffs.shape[self.sdim:]) == 0", ...
[ "0.74611837", "0.74560386", "0.7210494", "0.71899956", "0.7152722", "0.7101354", "0.70952576", "0.7088053", "0.7086914", "0.69344157", "0.6923261", "0.69007254", "0.68844897", "0.68752134", "0.67174804", "0.66352606", "0.66348445", "0.65968424", "0.658544", "0.65799415", "0.6...
0.0
-1
The size in bytes of the subarray. The size takes into account the datatype and assumes that there is a boolean mask, unless it can be ascertained that there isn't one.
def nbytes(self): dtype = self.config["dtype"] if dtype is None: return None size = reduce(mul, self.shape, 1) nbytes = size * dtype.itemsize if getattr(self, "masked", True): nbytes += size return nbytes
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def arraySize( cls, value, typeCode = None ):\n return value.size", "def array_size(self):\n return self._array_size", "def container_size(self):\n import cPickle\n import sys\n t = cPickle.dumps(self.filter_bitarray)\n return sys.getsizeof(t)", "def size(self):\...
[ "0.718968", "0.71867967", "0.7167967", "0.70721114", "0.69927114", "0.69399047", "0.6921515", "0.68736595", "0.6852514", "0.6835874", "0.6802148", "0.67929095", "0.67913187", "0.67521507", "0.6731404", "0.6717999", "0.67143357", "0.6703314", "0.67020184", "0.6700556", "0.6697...
0.6896727
7
Number of array dimensions.
def ndim(self): return len(self.shape)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def n_dims(self):\n return len(self.dimensions)", "def num_dim(self):\n return len(self._dimensions)", "def num_dim(self):\n return len(self._dimensions)", "def get_num_dimensions(self):\n dimensions = self.data.shape\n return dimensions[1]", "def count_dims(da):\n ret...
[ "0.87556416", "0.85560286", "0.85560286", "0.84606373", "0.8365305", "0.83450353", "0.8314268", "0.8273276", "0.82264763", "0.8216497", "0.81538165", "0.81395245", "0.8122816", "0.81203264", "0.809687", "0.8096407", "0.80840564", "0.80840564", "0.80781686", "0.80665874", "0.8...
0.79104465
30
Number of elements in the partition's data array (not its subarray).
def size(self): return reduce(mul, self.shape, 1)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def count(self):\r\n return self.data_array.size", "def count_elements_in_dataset(dataset):\n return dataset.count()", "def size(self):\n ret = 0\n for ii in self.__data:\n ret += int(ii.get_size())\n return ret", "def n_elements(self) -> int:\n n_elem = np.prod(self.shape)\n ...
[ "0.76638305", "0.73945427", "0.7326349", "0.7308581", "0.7271908", "0.72121114", "0.71300155", "0.7121346", "0.71050775", "0.7088031", "0.70531565", "0.6979757", "0.69550794", "0.694825", "0.69397396", "0.69286805", "0.6899676", "0.6895468", "0.68825126", "0.687968", "0.68745...
0.0
-1
The partition's subarray of data.
def subarray(self): return self._subarray
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def subarray(self) -> Subarray:\n return Subarray.from_pybind11(self._ctx, self._subarray)", "def partition(self, sep):\n return asarray(partition(self, sep))", "def array(self) -> ndarray:\n if self._slices: # so this is a sub-parray object\n # index into origin array by saved...
[ "0.68206084", "0.6535615", "0.6457086", "0.6347438", "0.62131214", "0.61877143", "0.61062545", "0.6101903", "0.60762966", "0.6068932", "0.6029079", "0.6012183", "0.59961045", "0.59653914", "0.5928066", "0.58841175", "0.58761495", "0.58514863", "0.5848823", "0.5791028", "0.577...
0.7297865
0
Change the axis names. The axis names are arbitrary, so mapping them to another arbitrary collection does not change the data array values, units, nor axis order.
def change_axis_names(self, axis_map): axes = self.axes # Partition axes self.axes = [axis_map[axis] for axis in axes] # Flipped axes flip = self.flip if flip: self.flip = [axis_map[axis] for axis in flip]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def setAxisName(name, axes='XYZ'):\n dislin.name(name, axes)", "def setAxesNames(self):\n \n labels = ['T', 'Z', 'Y', 'X'] + [chr(ord('S')-i) for i in xrange(18)]\n if (len(self.axisList) >= 4):\n i = 0\n else:\n i = 4 - len(self.axisList)\n \n ...
[ "0.7356323", "0.7326029", "0.6893155", "0.6677015", "0.64332557", "0.6372005", "0.63517636", "0.62905", "0.628417", "0.62775946", "0.6271645", "0.6077482", "0.6047013", "0.6024936", "0.5993588", "0.59712166", "0.5967535", "0.596201", "0.5952742", "0.58923167", "0.589169", "...
0.75589085
0
Close the partition after it has been conformed. The partition should usually be closed after its `array` method has been called to prevent memory leaks. Closing the partition does one of the following, depending on the values of the partition's `!_original` attribute and on the
def close(self, **kwargs): config = getattr(self, "config", None) if config is None: return if kwargs: config.update(kwargs) original = getattr(self, "_original", None) logger.partitioning("Partition.close: original = {}".format(original)) if not original: originally_on_disk = False original_subarray = None else: originally_on_disk = not original.in_memory original_subarray = original._subarray config = self.config logger.partitioning(" config = {}".format(config)) if config["serial"]: # -------------------------------------------------------- # SERIAL # -------------------------------------------------------- logger.partitioning(" serial") if config["readonly"]: logger.partitioning(" readonly=True") if originally_on_disk: logger.partitioning(" subarray originally on disk") if config.get("to_disk", False): # 1.1.1.1 The original subarray was on disk, # we don't want to keep the current # subarray in memory, and we are happy # to discard any changes that may have # been made to the subarray. logger.partitioning(" 1.1.1.1 revert") self.revert() elif free_memory() <= cf_fm_threshold(): # 1.1.1.2 The original subarray was on disk, # we are happy to keep the current # subarray in memory, but there is not # enough free memory to do so. logger.partitioning( " 1.1.1.2 revert ({} <= {})".format( free_memory(), cf_fm_threshold() ) ) self.revert() else: # 1.1.1.3 The original subarray was on disk # and there is enough memory to keep # the current subarray in memory if config["unique_subarray"] and isinstance( original_subarray, CachedArray ): # The original subarray was a temporary # file which is not referenced by any # other partitions _remove_temporary_files( original_subarray._partition_file ) del self.masked logger.partitioning( " 1.1.1.3 del masked ({} > {})".format( free_memory(), cf_fm_threshold() ) ) else: logger.partitioning(" subarray originally in memory") if config.get("to_disk", False): # 1.1.2.1 Original subarray was in memory and # we don't want to keep the current # subarray in memory logger.partitioning(" 1.1.2.1 to_disk") self.to_disk(reopen=False) elif free_memory() <= cf_fm_threshold(): # 1.1.2.2 Original subarray was in memory and # unique but there is not enough # memory to keep the current subarray logger.partitioning(" 1.1.2.2 to_disk") self.to_disk(reopen=False) else: # 1.1.2.3 Original subarray was in memory and # unique and there is enough memory to # keep the current subarray in memory logger.partitioning(" 1.1.2.3 pass") pass else: # config['readonly'] is False if originally_on_disk: if config.get("to_disk", False): # 1.2.1.1 Original subarray was on disk and # there and we don't want to keep the # array if config["unique_subarray"] and isinstance( original_subarray, CachedArray ): # Original subarray was a temporary file # on disk which is not referenced by any # other partitions _remove_temporary_files( original_subarray._partition_file ) logger.partitioning(" 1.2.1.1 to_disk") self.to_disk(reopen=False) elif free_memory() <= cf_fm_threshold(): # 1.2.1.2 Original subarray was on disk but # there is not enough memory to keep # it if config["unique_subarray"] and isinstance( original_subarray, CachedArray ): # Original subarray was a temporary file # on disk which is not referenced by any # other partitions _remove_temporary_files( original_subarray._partition_file ) logger.partitioning(" 1.2.1.2 to_disk") self.to_disk(reopen=False) else: # 1.2.1.3 Original subarray was on disk and # there is enough memory to keep it logger.partitioning(" 1.2.1.3 pass") del self.masked else: if config.get("to_disk", False): # 1.2.2.1 Original subarray was in memory but # we don't want to keep it logger.partitioning(" 1.2.2.1 to_disk") self.to_disk(reopen=False) elif free_memory() <= cf_fm_threshold(): # 1.2.2.2 Original subarray was an in memory # but there is not enough memory to # keep it logger.partitioning(" 1.2.2.2 to_disk") self.to_disk(reopen=False) else: # 1.2.2.3 Original subarray was in memory and # there is enough memory to keep it logger.partitioning(" 1.2.2.3 del masked") del self.masked else: logger.partitioning("Partition.close: parallel") # -------------------------------------------------------- # PARALLEL # -------------------------------------------------------- pass # if hasattr(self, '_original'): # del self._original # print(hasattr(self, 'config')), try: del self.config except AttributeError: pass
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def close(self):\n return self.close_array", "def file_close(self):\n if self.on_disk:\n self._subarray.close()", "def close(self):\n self.ix.close()", "def close (self):\n pass\n #TODO: implement more realistic closing semantics", "def close(self):\n self.data....
[ "0.6764165", "0.6518303", "0.60208786", "0.5811839", "0.5795013", "0.579095", "0.5784808", "0.57648385", "0.57648385", "0.57648385", "0.57648385", "0.57648385", "0.57648385", "0.57648385", "0.57648385", "0.5713722", "0.5699953", "0.5699953", "0.5665161", "0.5664806", "0.56545...
0.70555735
0
Return a deep copy. ``p.copy()`` is equivalent to ``copy.deepcopy(p)``.
def copy(self): new = Partition.__new__(Partition) new.__dict__ = self.__dict__.copy() self._increment_file_counter() return new
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def deepcopy(self):\n return copymod.deepcopy(self)", "def copy(self):\n\t\treturn pythoncopy.deepcopy(self)", "def copy(self):\n import copy as pcopy\n return pcopy.deepcopy(self)", "def deepcopy(self):\n return self.copy()", "def copy(self):\r\n return copy.deepcopy(self)", "...
[ "0.7977393", "0.7835692", "0.7835209", "0.7829921", "0.7808746", "0.7806165", "0.77548367", "0.77548367", "0.77548367", "0.77548367", "0.77548367", "0.77548367", "0.77548367", "0.77548367", "0.77548367", "0.77548367", "0.77548367", "0.77548367", "0.77548367", "0.77548367", "0...
0.0
-1
Returns the partition's data array. After a partition has been conformed, the partition must be closed (with the `close` method) before another partition is conformed,
def array(self): config = self.config unique_array = config["unique_subarray"] p_axes = self.axes p_flip = self.flip p_part = self.part p_units = self.Units p_shape = self.shape p_location = self.location subarray = self._subarray len_p_axes = len(p_axes) if not self.in_memory: # -------------------------------------------------------- # The subarray is not in memory. # # It could be in a file on disk or implied by a FileArray # object, etc. # -------------------------------------------------------- self._original = self.copy() unique_array = True update = True copy = False if not p_part: indices = Ellipsis else: indices = tuple(p_part) # Read from a file into a numpy array p_data = subarray[indices] # We've just copied p_data from disk, so in place changes # are not possible in_place_changes = False else: # -------------------------------------------------------- # The subarray is in memory # -------------------------------------------------------- update = config["update"] if p_part: p_data = get_subspace(subarray, p_part) elif not unique_array: p_data = subarray.view() else: p_data = subarray copy = config["extra_memory"] # In place changes to p_data might be possible if we're not # copying the data in_place_changes = not copy if not p_data.ndim and isinstance(p_data, (numpy_number, numpy_bool_)): # -------------------------------------------------------- # p_data is a numpy number (like numpy.int64) which does # not support assignment, so convert it to a numpy array. # -------------------------------------------------------- p_data = numpy_array(p_data) # We've just copied p_data, so in place changes are # not possible copy = False in_place_changes = False masked = numpy_ma_isMA(p_data) if masked: # The p_data is a masked array if p_data.mask is numpy_ma_nomask or not numpy_ma_is_masked( p_data ): # There are no missing data points so recast as an # unmasked numpy array p_data = p_data.data masked = False # --- End: if if masked: # Set the hardness of the mask if config["hardmask"]: p_data.harden_mask() else: p_data.soften_mask() # --- End: if self.masked = masked # ------------------------------------------------------------ # Make sure that the data array has the correct units. This # process will deep copy the data array if required (e.g. if # another partition is referencing this numpy array), even if # the units are already correct. # ------------------------------------------------------------ func = config.get("func") units = config["units"] if func is None: if not p_units.equals(units) and bool(p_units) is bool(units): func = Units.conform if func is not None: inplace = not copy p_data = func(p_data, p_units, units, inplace) p_units = units if not inplace: # We've just copied p_data, so in place changes are # not possible copy = False in_place_changes = False # --- End: if flip = config.get("flip", None) if flip or p_flip: flip_axes = set(p_flip).symmetric_difference(flip) else: flip_axes = None axes = config["axes"] if p_data.size > 1: # -------------------------------------------------------- # Flip axes # -------------------------------------------------------- if flip_axes: indices = [ ( slice(None, None, -1) if axis in flip_axes else slice(None) ) for axis in p_axes ] p_data = p_data[tuple(indices)] # -------------------------------------------------------- # Transpose axes # -------------------------------------------------------- if p_axes != axes: iaxes = [p_axes.index(axis) for axis in axes if axis in p_axes] if len_p_axes > len(iaxes): for i in range(len_p_axes): if i not in iaxes: # iaxes.append(i) iaxes.insert(i, i) # --- End: if p_data = numpy_transpose(p_data, iaxes) # --- End: if # ------------------------------------------------------------ # Remove excessive/insert missing size 1 axes # ------------------------------------------------------------ if p_shape != p_data.shape: # if len_p_axes != len(p_shape): p_data = p_data.reshape(p_shape) # ------------------------------------------------------------ # Apply the auxiliary mask # ------------------------------------------------------------ auxiliary_mask = config["auxiliary_mask"] if auxiliary_mask: for mask in auxiliary_mask: if mask.any(): if not masked: p_data = p_data.view(numpy_ma_MaskedArray) masked = True p_data.mask = (mask | p_data.mask).array # --- End: for self.masked = True # ------------------------------------------------------------ # Convert the array's data type # ------------------------------------------------------------ p_dtype = p_data.dtype dtype = config.get("dtype", None) if dtype is not None and dtype != p_dtype: try: p_data = p_data.astype(dtype) # Note: returns a copy except ValueError: raise ValueError( "Can't recast partition array from {} to {}".format( p_dtype.name, dtype.name ) ) else: # We've just copied p_data, so in place changes are # not possible copy = False in_place_changes = False # --- End: if # ------------------------------------------------------------ # Copy the array # ----------------------------------------------------------- if copy: if p_dtype.char != "O": if not masked or p_data.ndim > 0: p_data = p_data.copy() else: # This is because numpy.ma.copy doesn't work for # scalar arrays (at the moment, at least) p_data = numpy_ma_masked_all((), p_data.dtype) # We've just copied p_data, so in place changes are # not possible in_place_changes = False else: # whilst netCDF4.netcdftime.datetime is mucking bout, # don't copy!!!! # p_data = _copy(p_data) pass # --- End: if # ------------------------------------------------------------ # Update the partition # ------------------------------------------------------------ if update: self.subarray = p_data # ?? DCH CHECK self.Units = p_units self.part = [] self.axes = axes self.flip = flip self.flatten = [] self.shape = p_shape self.location = p_location self._in_place_changes = in_place_changes # ------------------------------------------------------------ # Return the numpy array # ------------------------------------------------------------ return p_data
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_data ( self ):\n return self._data_pntr.ReadAsArray()", "def get_data ( self ):\n return self._data_pntr.ReadAsArray()", "def data_array(self):\n return self._data_array", "def getData(self):\n return self._array", "def get(self):\r\n return self.data_array", "d...
[ "0.66712004", "0.66712004", "0.6514655", "0.62883234", "0.62340677", "0.6196425", "0.6086166", "0.6076796", "0.5936824", "0.59225637", "0.5890117", "0.5851978", "0.5830991", "0.5823379", "0.5809332", "0.57856905", "0.5781631", "0.57773644", "0.57653815", "0.5727279", "0.57197...
0.5538118
31
True if the subarray contains datetime objects.
def isdt(self): return self.Units.isreftime and self._subarray.dtype == _dtype_object
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def has_datetime_type(obj: _std_typing.Any) -> bool:\n return obj.dtype == sc.DType.datetime64", "def _uses_datetimeblock(dtype: Union[np.dtype, ExtensionDtype]) -> bool:\n vtype = dtype.type\n return issubclass(vtype, np.datetime64)", "def is_datetime(self) -> bool:\n return False", "def are...
[ "0.72152364", "0.68011504", "0.67946845", "0.6540459", "0.6300369", "0.62526584", "0.6159265", "0.612599", "0.612436", "0.60484034", "0.60277694", "0.6001856", "0.5956571", "0.5938945", "0.58849597", "0.58664906", "0.58624506", "0.5751764", "0.56482595", "0.56456316", "0.5640...
0.71551335
1
Close the file containing the subarray, if there is one.
def file_close(self): if self.on_disk: self._subarray.close()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def close_file(self):\n self.root_group.close()", "def __del__(self):\n # subarray = getattr(self, '_subarray', None)\n subarray = self._subarray\n\n # If the subarray is unique it will have 2 references to\n # it plus 1 within this method, making 3. If it has more\n ...
[ "0.61085176", "0.6024418", "0.59545076", "0.5824396", "0.58042437", "0.5736725", "0.57022864", "0.57022864", "0.56994104", "0.56970567", "0.56860274", "0.564539", "0.56289333", "0.56183493", "0.5591367", "0.5591367", "0.55517685", "0.5509945", "0.5487693", "0.54802656", "0.54...
0.7828047
0
Inspect the object for debugging.
def inspect(self): print(cf_inspect(self))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def inspect(obj:Any) -> None:\n\t\tLogging._log(Logging.logLevel, obj)", "def debug(self):\n raise NotImplementedError", "def output_debug_info(self):", "def inspect_obj(self, line=None):\n if not line:\n return\n\n # evaluate the line to get a python object\n python_ob...
[ "0.7372906", "0.72778416", "0.6883075", "0.6746992", "0.6737449", "0.6555419", "0.6541405", "0.65274495", "0.6431801", "0.64068395", "0.6402232", "0.640157", "0.6398631", "0.63798773", "0.6376369", "0.6375551", "0.63642555", "0.63500595", "0.6315535", "0.6302645", "0.62807316...
0.6848801
3
Return an iterator over indices of the master array which are spanned by the data array.
def master_ndindex(self): # itermaster_indices(self): return itertools_product( *[range(*r) for r in self.location] ) # TODO check
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_subset_inds(self, adata_parent):\r\n subset_inds = np.ones(len(adata_parent), dtype=bool)\r\n for condition, values in self.subset_cond.items():\r\n subset_inds *= adata_parent.obs[condition].isin(values)\r\n return subset_inds", "def enumerate(self):\n # go through...
[ "0.61020994", "0.6072502", "0.60690475", "0.60687184", "0.60543483", "0.60275173", "0.5924552", "0.5918545", "0.59088135", "0.58821535", "0.58567846", "0.58429295", "0.5828838", "0.58083194", "0.5794418", "0.5793355", "0.57828283", "0.57748425", "0.5756645", "0.57550335", "0....
0.6835397
0
Update the `!part` attribute inplace for new indices of the master array.
def new_part(self, indices, master_axis_to_position, master_flip): shape = self.shape if indices == [slice(0, stop, 1) for stop in shape]: return # ------------------------------------------------------------ # If a dimension runs in the wrong direction then change its # index to account for this. # # For example, if a dimension with the wrong direction has # size 10 and its index is slice(3,8,2) then after the # direction is set correctly, the index needs to changed to # slice(6,0,-2): # # >>> a = [9, 8, 7, 6, 5, 4, 3, 2, 1, 0] # >>> a[slice(3, 8, 2)] # [6, 4, 2] # >>> a.reverse() # >>> print(a) # >>> a = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9] # >>> a[slice(6, 0, -2)] # [6, 4, 2] # ------------------------------------------------------------ if self._subarray.size > 1: indices = indices[:] p_flip = self.flip for axis, i in master_axis_to_position.items(): if (axis not in p_flip and axis not in master_flip) or ( axis in p_flip and axis in master_flip ): # This axis runs in the correct direction continue # Still here? Then this axis runs in the wrong # direction. # Reset the direction p_flip = p_flip[:] if axis in self.flip: p_flip.remove(axis) else: p_flip.append(axis) # Modify the index to account for the changed # direction size = shape[i] if isinstance(indices[i], slice): start, stop, step = indices[i].indices(size) # Note that step is assumed to be always +ve here div, mod = divmod(stop - start - 1, step) start = size - 1 - start stop = start - div * step - 1 if stop < 0: stop = None indices[i] = slice(start, stop, -step) else: size -= 1 indices[i] = [size - j for j in indices[i]] # --- End: for self.flip = p_flip # --- End: if slice_None = slice(None) # Reorder the new indices indices = [ ( indices[master_axis_to_position[axis]] if axis in master_axis_to_position else slice_None ) for axis in self.axes ] part = self.part if not part: self.part = indices return # Still here? update an existing part p_part = [] for part_index, index, size in zip( part, indices, self._subarray.shape ): if index == slice_None: p_part.append(part_index) continue if isinstance(part_index, slice): if isinstance(index, slice): start, stop, step = part_index.indices(size) size1, mod = divmod(stop - start - 1, step) start1, stop1, step1 = index.indices(size1 + 1) size2, mod = divmod(stop1 - start1, step1) if mod != 0: size2 += 1 start += start1 * step step *= step1 stop = start + (size2 - 1) * step if step > 0: stop += 1 else: stop -= 1 if stop < 0: stop = None p_part.append(slice(start, stop, step)) continue else: new_part = list(range(*part_index.indices(size))) new_part = [new_part[i] for i in index] else: if isinstance(index, slice): new_part = part_index[index] else: new_part = [part_index[i] for i in index] # --- End: if # Still here? Then the new element of p_part is a list of # integers, so let's see if we can convert it to a slice # before appending it. new_part0 = new_part[0] if len(new_part) == 1: # Convert a single element list to a slice object new_part = slice(new_part0, new_part0 + 1, 1) else: step = new_part[1] - new_part0 if step: if step > 0: start, stop = new_part0, new_part[-1] + 1 else: start, stop = new_part0, new_part[-1] - 1 if new_part == list(range(start, stop, step)): if stop < 0: stop = None new_part = slice(start, stop, step) # --- End: if p_part.append(new_part) # --- End: for self.part = p_part
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def save(self, patch):\n internalSlices = self._get_internal_slices(patch.slices)\n self.array[internalSlices] = patch.array", "def _idx_changed(self, idx):\n self.refresh_memory()", "def _loadpart(self, part):\n new_partidx = util.Partname(part.partname).idx\n for idx, seq_p...
[ "0.5455975", "0.5450583", "0.5443926", "0.53721666", "0.5359862", "0.5275372", "0.52543634", "0.52315736", "0.5228334", "0.51149035", "0.50902224", "0.50782204", "0.5062653", "0.50265366", "0.50074023", "0.49909484", "0.49905938", "0.49756426", "0.4970326", "0.496184", "0.496...
0.5784396
0
The extra memory required to access the array.
def extra_memory(self): if not self.in_memory: # -------------------------------------------------------- # The subarray is on disk so getting the partition's data # array will require extra memory # -------------------------------------------------------- extra_memory = True else: # -------------------------------------------------------- # The subarray is already in memory # -------------------------------------------------------- config = self.config p_part = self.part if p_part: extra_memory = True elif not config["unique_subarray"]: extra_memory = True else: p_data = self._subarray if not numpy_ma_isMA(p_data): # The p_data is not a masked array extra_memory = isinstance(p_data.base, numpy_ndarray) else: # The p_data is a masked array memory_overlap = isinstance( p_data.data.base, numpy_ndarray ) if not ( p_data.mask is numpy_ma_nomask or not numpy_ma_is_masked(p_data) ): # There is at least one missing data point memory_overlap |= isinstance( p_data.mask.base, numpy_ndarray ) extra_memory = memory_overlap # --- End: if p_dtype = p_data.dtype if not extra_memory: if config["func"] is not None: extra_memory = True else: p_units = self.Units units = config["units"] if ( not p_units.equals(units) and bool(p_units) is bool(units) and not ( p_data.flags["C_CONTIGUOUS"] and p_dtype.kind == "f" ) ): extra_memory = True # ------------------------------------------------------------ # Extra memory is required if the dtype needs changing # ------------------------------------------------------------ if not extra_memory: dtype = config["dtype"] if dtype is not None and dtype != p_data.dtype: extra_memory = True # --- End: if # ------------------------------------------------------------ # Amount of extra memory (in bytes) required to access the # array # ------------------------------------------------------------ return self.nbytes if extra_memory else 0
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def allocated_memory(self):\n return self._allocated_memory", "def memory(self):\r\n return self._memory", "def __len__(self):\n\t\treturn len(self.memory)", "def __len__(self):\r\n return len(self.memory)", "def __len__(self):\n return len(self.memory)", "def __len__(self):\n...
[ "0.6821264", "0.6811845", "0.66922146", "0.66735834", "0.6612898", "0.6612898", "0.6612898", "0.6612898", "0.6612898", "0.6612898", "0.6612898", "0.6612898", "0.6612898", "0.6612898", "0.6612898", "0.6612898", "0.6612898", "0.6612898", "0.6612898", "0.6612898", "0.6612898", ...
0.73394907
0
Open the partition prior to getting its array.
def open(self, config): unique_subarray = getrefcount(self._subarray) <= 2 config = config.copy() config["unique_subarray"] = unique_subarray self.config = config if config.get("auxiliary_mask"): self._configure_auxiliary_mask(config["auxiliary_mask"]) self.config["extra_memory"] = self.extra_memory() self._in_place_changes = True self.masked = True if hasattr(self, "output"): del self.output return config
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def open(self):\n return self.open_array", "def partition_book(self):\n ...", "def to_disk(self, reopen=True):\n # try:\n tfa = CachedArray(self.array)\n # except Exception:\n # return False\n\n fd, _lock_file = mkstemp(\n pre...
[ "0.5840582", "0.5428586", "0.5294434", "0.52601844", "0.51751643", "0.50962955", "0.50962955", "0.509415", "0.5024122", "0.5011268", "0.49543542", "0.4883474", "0.48718578", "0.48603144", "0.48522878", "0.48517695", "0.4839195", "0.48225853", "0.48027864", "0.48000965", "0.47...
0.0
-1